diff --git a/AiS_TX.grc b/AiS_TX.grc
index a4cbe0c..100e13f 100644
--- a/AiS_TX.grc
+++ b/AiS_TX.grc
@@ -1,1369 +1,639 @@
-
-
- Fri Nov 14 14:27:09 2014
-
- options
-
- id
- top_block
-
-
- _enabled
- True
-
-
- title
-
-
-
- author
-
-
-
- description
-
-
-
- window_size
- 1280, 1024
-
-
- generate_options
- wx_gui
-
-
- category
- Custom
-
-
- run_options
- prompt
-
-
- run
- True
-
-
- max_nouts
- 0
-
-
- realtime_scheduling
-
-
-
- _coordinate
- (-1, 0)
-
-
- _rotation
- 0
-
-
-
- analog_sig_source_x
-
- id
- analog_sig_source_x_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- samp_rate
- samp_rate
-
-
- waveform
- analog.GR_SIN_WAVE
-
-
- freq
- -25000
-
-
- amp
- 1
-
-
- offset
- 0
-
-
- _coordinate
- (253, 184)
-
-
- _rotation
- 0
-
-
-
- blks2_selector
-
- id
- blks2_selector_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- num_inputs
- 3
-
-
- num_outputs
- 1
-
-
- input_index
- channel_select
-
-
- output_index
- 0
-
-
- vlen
- 1
-
-
- _coordinate
- (953, 164)
-
-
- _rotation
- 0
-
-
-
- variable
-
- id
- samp_rate
-
-
- _enabled
- True
-
-
- value
- 326531
-
-
- _coordinate
- (173, 0)
-
-
- _rotation
- 0
-
-
-
- variable
-
- id
- bit_rate
-
-
- _enabled
- True
-
-
- value
- 9600
-
-
- _coordinate
- (281, -1)
-
-
- _rotation
- 0
-
-
-
- digital_gmsk_mod
-
- id
- digital_gmsk_mod_0
-
-
- _enabled
- True
-
-
- samples_per_symbol
- int(samp_rate/bit_rate)
-
-
- bt
- 0.4
-
-
- verbose
- False
-
-
- log
- False
-
-
- _coordinate
- (254, 103)
-
-
- _rotation
- 0
-
-
-
- blocks_multiply_xx
-
- id
- blocks_multiply_xx_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- num_inputs
- 2
-
-
- vlen
- 1
-
-
- _coordinate
- (464, 148)
-
-
- _rotation
- 0
-
-
-
- blocks_multiply_const_vxx
-
- id
- blocks_multiply_const_vxx_0_1
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- const
- 0.9
-
-
- vlen
- 1
-
-
- _coordinate
- (681, 160)
-
-
- _rotation
- 0
-
-
-
- blocks_multiply_const_vxx
-
- id
- blocks_multiply_const_vxx_0_1_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- const
- 0.9
-
-
- vlen
- 1
-
-
- _coordinate
- (697, 376)
-
-
- _rotation
- 0
-
-
-
- blocks_add_xx
-
- id
- blocks_add_xx_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- num_inputs
- 2
-
-
- vlen
- 1
-
-
- _coordinate
- (803, 212)
-
-
- _rotation
- 0
-
-
-
- blocks_multiply_const_vxx
-
- id
- blocks_multiply_const_vxx_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- const
- 0.45
-
-
- vlen
- 1
-
-
- _coordinate
- (611, 208)
-
-
- _rotation
- 0
-
-
-
- blocks_multiply_const_vxx
-
- id
- blocks_multiply_const_vxx_0_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- const
- 0.45
-
-
- vlen
- 1
-
-
- _coordinate
- (605, 301)
-
-
- _rotation
- 0
-
-
-
- blocks_multiply_xx
-
- id
- blocks_multiply_xx_0_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- num_inputs
- 2
-
-
- vlen
- 1
-
-
- _coordinate
- (458, 364)
-
-
- _rotation
- 0
-
-
-
- digital_gmsk_mod
-
- id
- digital_gmsk_mod_0_0
-
-
- _enabled
- True
-
-
- samples_per_symbol
- int(samp_rate/bit_rate)
-
-
- bt
- 0.4
-
-
- verbose
- False
-
-
- log
- False
-
-
- _coordinate
- (249, 321)
-
-
- _rotation
- 0
-
-
-
- analog_sig_source_x
-
- id
- analog_sig_source_x_0_0
-
-
- _enabled
- True
-
-
- type
- complex
-
-
- samp_rate
- samp_rate
-
-
- waveform
- analog.GR_SIN_WAVE
-
-
- freq
- 25000
-
-
- amp
- 1
-
-
- offset
- 0
-
-
- _coordinate
- (250, 401)
-
-
- _rotation
- 0
-
-
-
- variable
-
- id
- channel_select
-
-
- _enabled
- True
-
-
- value
- 2
-
-
- _coordinate
- (368, -1)
-
-
- _rotation
- 0
-
-
-
- uhd_usrp_sink
-
- id
- uhd_usrp_sink_0
-
-
- _enabled
- True
-
-
- type
- fc32
-
-
- otw
-
-
-
- stream_args
-
-
-
- dev_addr
-
-
-
- sync
-
-
-
- clock_rate
- 0.0
-
-
- num_mboards
- 1
-
-
- clock_source0
-
-
-
- time_source0
-
-
-
- sd_spec0
-
-
-
- clock_source1
-
-
-
- time_source1
- gpsdo
-
-
- sd_spec1
-
-
-
- clock_source2
-
-
-
- time_source2
-
-
-
- sd_spec2
-
-
-
- clock_source3
-
-
-
- time_source3
-
-
-
- sd_spec3
-
-
-
- clock_source4
-
-
-
- time_source4
-
-
-
- sd_spec4
-
-
-
- clock_source5
-
-
-
- time_source5
-
-
-
- sd_spec5
-
-
-
- clock_source6
-
-
-
- time_source6
-
-
-
- sd_spec6
-
-
-
- clock_source7
-
-
-
- time_source7
-
-
-
- sd_spec7
-
-
-
- nchan
- 1
-
-
- samp_rate
- samp_rate
-
-
- center_freq0
- uhd.tune_request_t(162000000, 19000000)
-
-
- gain0
- 0
-
-
- ant0
- TX/RX
-
-
- bw0
- 0
-
-
- center_freq1
- 0
-
-
- gain1
- 0
-
-
- ant1
-
-
-
- bw1
- 0
-
-
- center_freq2
- 0
-
-
- gain2
- 0
-
-
- ant2
-
-
-
- bw2
- 0
-
-
- center_freq3
- 0
-
-
- gain3
- 0
-
-
- ant3
-
-
-
- bw3
- 0
-
-
- center_freq4
- 0
-
-
- gain4
- 0
-
-
- ant4
-
-
-
- bw4
- 0
-
-
- center_freq5
- 0
-
-
- gain5
- 0
-
-
- ant5
-
-
-
- bw5
- 0
-
-
- center_freq6
- 0
-
-
- gain6
- 0
-
-
- ant6
-
-
-
- bw6
- 0
-
-
- center_freq7
- 0
-
-
- gain7
- 0
-
-
- ant7
-
-
-
- bw7
- 0
-
-
- center_freq8
- 0
-
-
- gain8
- 0
-
-
- ant8
-
-
-
- bw8
- 0
-
-
- center_freq9
- 0
-
-
- gain9
- 0
-
-
- ant9
-
-
-
- bw9
- 0
-
-
- center_freq10
- 0
-
-
- gain10
- 0
-
-
- ant10
-
-
-
- bw10
- 0
-
-
- center_freq11
- 0
-
-
- gain11
- 0
-
-
- ant11
-
-
-
- bw11
- 0
-
-
- center_freq12
- 0
-
-
- gain12
- 0
-
-
- ant12
-
-
-
- bw12
- 0
-
-
- center_freq13
- 0
-
-
- gain13
- 0
-
-
- ant13
-
-
-
- bw13
- 0
-
-
- center_freq14
- 0
-
-
- gain14
- 0
-
-
- ant14
-
-
-
- bw14
- 0
-
-
- center_freq15
- 0
-
-
- gain15
- 0
-
-
- ant15
-
-
-
- bw15
- 0
-
-
- center_freq16
- 0
-
-
- gain16
- 0
-
-
- ant16
-
-
-
- bw16
- 0
-
-
- center_freq17
- 0
-
-
- gain17
- 0
-
-
- ant17
-
-
-
- bw17
- 0
-
-
- center_freq18
- 0
-
-
- gain18
- 0
-
-
- ant18
-
-
-
- bw18
- 0
-
-
- center_freq19
- 0
-
-
- gain19
- 0
-
-
- ant19
-
-
-
- bw19
- 0
-
-
- center_freq20
- 0
-
-
- gain20
- 0
-
-
- ant20
-
-
-
- bw20
- 0
-
-
- center_freq21
- 0
-
-
- gain21
- 0
-
-
- ant21
-
-
-
- bw21
- 0
-
-
- center_freq22
- 0
-
-
- gain22
- 0
-
-
- ant22
-
-
-
- bw22
- 0
-
-
- center_freq23
- 0
-
-
- gain23
- 0
-
-
- ant23
-
-
-
- bw23
- 0
-
-
- center_freq24
- 0
-
-
- gain24
- 0
-
-
- ant24
-
-
-
- bw24
- 0
-
-
- center_freq25
- 0
-
-
- gain25
- 0
-
-
- ant25
-
-
-
- bw25
- 0
-
-
- center_freq26
- 0
-
-
- gain26
- 0
-
-
- ant26
-
-
-
- bw26
- 0
-
-
- center_freq27
- 0
-
-
- gain27
- 0
-
-
- ant27
-
-
-
- bw27
- 0
-
-
- center_freq28
- 0
-
-
- gain28
- 0
-
-
- ant28
-
-
-
- bw28
- 0
-
-
- center_freq29
- 0
-
-
- gain29
- 0
-
-
- ant29
-
-
-
- bw29
- 0
-
-
- center_freq30
- 0
-
-
- gain30
- 0
-
-
- ant30
-
-
-
- bw30
- 0
-
-
- center_freq31
- 0
-
-
- gain31
- 0
-
-
- ant31
-
-
-
- bw31
- 0
-
-
- _coordinate
- (932, 305)
-
-
- _rotation
- 0
-
-
-
- AISTX_Build_Frame
-
- id
- AISTX_Build_Frame_0
-
-
- _enabled
- True
-
-
- sentence
- 000100000001101100011001110111011011110000000000000000000000011000111100111100100000101100100000101101000110011010001010010100010000000001000000000000000000000000000000
-
-
- repeat
- True
-
-
- enable_NRZI
- True
-
-
- _coordinate
- (0, 97)
-
-
- _rotation
- 0
-
-
-
- AISTX_Build_Frame
-
- id
- AISTX_Build_Frame_1
-
-
- _enabled
- True
-
-
- sentence
- 010100000001101100011001110111011011110000000000000001011110000000000000
-
-
- repeat
- True
-
-
- enable_NRZI
- True
-
-
- _coordinate
- (1, 317)
-
-
- _rotation
- 0
-
-
-
- digital_gmsk_mod_0
- blocks_multiply_xx_0
- 0
- 0
-
-
- analog_sig_source_x_0
- blocks_multiply_xx_0
- 0
- 1
-
-
- analog_sig_source_x_0_0
- blocks_multiply_xx_0_0
- 0
- 1
-
-
- digital_gmsk_mod_0_0
- blocks_multiply_xx_0_0
- 0
- 0
-
-
- blocks_multiply_xx_0_0
- blocks_multiply_const_vxx_0_0
- 0
- 0
-
-
- blocks_multiply_xx_0
- blocks_multiply_const_vxx_0_1
- 0
- 0
-
-
- blocks_multiply_xx_0
- blocks_multiply_const_vxx_0
- 0
- 0
-
-
- blks2_selector_0
- uhd_usrp_sink_0
- 0
- 0
-
-
- blocks_add_xx_0
- blks2_selector_0
- 0
- 2
-
-
- blocks_multiply_const_vxx_0_1
- blks2_selector_0
- 0
- 0
-
-
- blocks_multiply_const_vxx_0_1_0
- blks2_selector_0
- 0
- 1
-
-
- blocks_multiply_xx_0_0
- blocks_multiply_const_vxx_0_1_0
- 0
- 0
-
-
- blocks_multiply_const_vxx_0
- blocks_add_xx_0
- 0
- 0
-
-
- blocks_multiply_const_vxx_0_0
- blocks_add_xx_0
- 0
- 1
-
-
- AISTX_Build_Frame_0
- digital_gmsk_mod_0
- 0
- 0
-
-
- AISTX_Build_Frame_1
- digital_gmsk_mod_0_0
- 0
- 0
-
-
+options:
+ parameters:
+ author: ''
+ catch_exceptions: 'True'
+ category: Custom
+ cmake_opt: ''
+ comment: ''
+ copyright: ''
+ description: ''
+ gen_cmake: 'On'
+ gen_linking: dynamic
+ generate_options: qt_gui
+ hier_block_src_path: '.:'
+ id: top_block
+ max_nouts: '0'
+ output_language: python
+ placement: (0,0)
+ qt_qss_theme: ''
+ realtime_scheduling: ''
+ run: 'True'
+ run_command: '{python} -u {filename}'
+ run_options: prompt
+ sizing_mode: fixed
+ thread_safe_setters: ''
+ title: ''
+ window_size: 1280, 1024
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [-1, 0]
+ rotation: 0
+ state: enabled
+
+blocks:
+- name: bit_rate
+ id: variable
+ parameters:
+ comment: ''
+ value: '9600'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [281, -1]
+ rotation: 0
+ state: enabled
+- name: channel_select
+ id: variable
+ parameters:
+ comment: ''
+ value: '2'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [368, -1]
+ rotation: 0
+ state: enabled
+- name: samp_rate
+ id: variable
+ parameters:
+ comment: ''
+ value: '326531'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [173, 0]
+ rotation: 0
+ state: enabled
+- name: AISTX_Build_Frame_0
+ id: AISTX_Build_Frame
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ enable_NRZI: 'True'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ repeat: 'True'
+ sentence: '000100000001101100011001110111011011110000000000000000000000011000111100111100100000101100100000101101000110011010001010010100010000000001000000000000000000000000000000'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [0, 97]
+ rotation: 0
+ state: enabled
+- name: AISTX_Build_Frame_1
+ id: AISTX_Build_Frame
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ enable_NRZI: 'True'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ repeat: 'True'
+ sentence: '010100000001101100011001110111011011110000000000000001011110000000000000'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [1, 317]
+ rotation: 0
+ state: enabled
+- name: analog_sig_source_x_0
+ id: analog_sig_source_x
+ parameters:
+ affinity: ''
+ alias: ''
+ amp: '1'
+ comment: ''
+ freq: '-25000'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ offset: '0'
+ phase: '0'
+ samp_rate: samp_rate
+ type: complex
+ waveform: analog.GR_SIN_WAVE
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [253, 184]
+ rotation: 0
+ state: enabled
+- name: analog_sig_source_x_0_0
+ id: analog_sig_source_x
+ parameters:
+ affinity: ''
+ alias: ''
+ amp: '1'
+ comment: ''
+ freq: '25000'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ offset: '0'
+ phase: '0'
+ samp_rate: samp_rate
+ type: complex
+ waveform: analog.GR_SIN_WAVE
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [250, 401]
+ rotation: 0
+ state: enabled
+- name: blocks_add_xx_0
+ id: blocks_add_xx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ num_inputs: '2'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [803, 212]
+ rotation: 0
+ state: enabled
+- name: blocks_multiply_const_vxx_0
+ id: blocks_multiply_const_vxx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ const: '0.45'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [611, 208]
+ rotation: 0
+ state: enabled
+- name: blocks_multiply_const_vxx_0_0
+ id: blocks_multiply_const_vxx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ const: '0.45'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [605, 301]
+ rotation: 0
+ state: enabled
+- name: blocks_multiply_const_vxx_0_1
+ id: blocks_multiply_const_vxx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ const: '0.9'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [681, 160]
+ rotation: 0
+ state: enabled
+- name: blocks_multiply_const_vxx_0_1_0
+ id: blocks_multiply_const_vxx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ const: '0.9'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [697, 376]
+ rotation: 0
+ state: enabled
+- name: blocks_multiply_xx_0
+ id: blocks_multiply_xx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ num_inputs: '2'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [464, 148]
+ rotation: 0
+ state: enabled
+- name: blocks_multiply_xx_0_0
+ id: blocks_multiply_xx
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ num_inputs: '2'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [458, 364]
+ rotation: 0
+ state: enabled
+- name: blocks_selector_0
+ id: blocks_selector
+ parameters:
+ affinity: ''
+ alias: channel_select
+ comment: ''
+ enabled: 'True'
+ input_index: '0'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ num_inputs: '3'
+ num_outputs: '1'
+ output_index: '0'
+ showports: 'True'
+ type: complex
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [1088, 80.0]
+ rotation: 0
+ state: true
+- name: digital_gmsk_mod_0
+ id: digital_gmsk_mod
+ parameters:
+ affinity: ''
+ alias: ''
+ bt: '0.4'
+ comment: ''
+ do_unpack: 'True'
+ log: 'False'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ samples_per_symbol: int(samp_rate/bit_rate)
+ verbose: 'False'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [254, 103]
+ rotation: 0
+ state: enabled
+- name: digital_gmsk_mod_0_0
+ id: digital_gmsk_mod
+ parameters:
+ affinity: ''
+ alias: ''
+ bt: '0.4'
+ comment: ''
+ do_unpack: 'True'
+ log: 'False'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ samples_per_symbol: int(samp_rate/bit_rate)
+ verbose: 'False'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [249, 321]
+ rotation: 0
+ state: enabled
+- name: uhd_usrp_sink_0
+ id: uhd_usrp_sink
+ parameters:
+ affinity: ''
+ alias: ''
+ ant0: TX/RX
+ ant1: ''
+ ant10: ''
+ ant11: ''
+ ant12: ''
+ ant13: ''
+ ant14: ''
+ ant15: ''
+ ant16: ''
+ ant17: ''
+ ant18: ''
+ ant19: ''
+ ant2: ''
+ ant20: ''
+ ant21: ''
+ ant22: ''
+ ant23: ''
+ ant24: ''
+ ant25: ''
+ ant26: ''
+ ant27: ''
+ ant28: ''
+ ant29: ''
+ ant3: ''
+ ant30: ''
+ ant31: ''
+ ant4: ''
+ ant5: ''
+ ant6: ''
+ ant7: ''
+ ant8: ''
+ ant9: ''
+ bw0: '0'
+ bw1: '0'
+ bw10: '0'
+ bw11: '0'
+ bw12: '0'
+ bw13: '0'
+ bw14: '0'
+ bw15: '0'
+ bw16: '0'
+ bw17: '0'
+ bw18: '0'
+ bw19: '0'
+ bw2: '0'
+ bw20: '0'
+ bw21: '0'
+ bw22: '0'
+ bw23: '0'
+ bw24: '0'
+ bw25: '0'
+ bw26: '0'
+ bw27: '0'
+ bw28: '0'
+ bw29: '0'
+ bw3: '0'
+ bw30: '0'
+ bw31: '0'
+ bw4: '0'
+ bw5: '0'
+ bw6: '0'
+ bw7: '0'
+ bw8: '0'
+ bw9: '0'
+ center_freq0: uhd.tune_request_t(162000000, 19000000)
+ center_freq1: '0'
+ center_freq10: '0'
+ center_freq11: '0'
+ center_freq12: '0'
+ center_freq13: '0'
+ center_freq14: '0'
+ center_freq15: '0'
+ center_freq16: '0'
+ center_freq17: '0'
+ center_freq18: '0'
+ center_freq19: '0'
+ center_freq2: '0'
+ center_freq20: '0'
+ center_freq21: '0'
+ center_freq22: '0'
+ center_freq23: '0'
+ center_freq24: '0'
+ center_freq25: '0'
+ center_freq26: '0'
+ center_freq27: '0'
+ center_freq28: '0'
+ center_freq29: '0'
+ center_freq3: '0'
+ center_freq30: '0'
+ center_freq31: '0'
+ center_freq4: '0'
+ center_freq5: '0'
+ center_freq6: '0'
+ center_freq7: '0'
+ center_freq8: '0'
+ center_freq9: '0'
+ clock_rate: '0.0'
+ clock_source0: ''
+ clock_source1: ''
+ clock_source2: ''
+ clock_source3: ''
+ clock_source4: ''
+ clock_source5: ''
+ clock_source6: ''
+ clock_source7: ''
+ comment: ''
+ dev_addr: ''
+ dev_args: ''
+ gain0: '0'
+ gain1: '0'
+ gain10: '0'
+ gain11: '0'
+ gain12: '0'
+ gain13: '0'
+ gain14: '0'
+ gain15: '0'
+ gain16: '0'
+ gain17: '0'
+ gain18: '0'
+ gain19: '0'
+ gain2: '0'
+ gain20: '0'
+ gain21: '0'
+ gain22: '0'
+ gain23: '0'
+ gain24: '0'
+ gain25: '0'
+ gain26: '0'
+ gain27: '0'
+ gain28: '0'
+ gain29: '0'
+ gain3: '0'
+ gain30: '0'
+ gain31: '0'
+ gain4: '0'
+ gain5: '0'
+ gain6: '0'
+ gain7: '0'
+ gain8: '0'
+ gain9: '0'
+ gain_type0: default
+ gain_type1: default
+ gain_type10: default
+ gain_type11: default
+ gain_type12: default
+ gain_type13: default
+ gain_type14: default
+ gain_type15: default
+ gain_type16: default
+ gain_type17: default
+ gain_type18: default
+ gain_type19: default
+ gain_type2: default
+ gain_type20: default
+ gain_type21: default
+ gain_type22: default
+ gain_type23: default
+ gain_type24: default
+ gain_type25: default
+ gain_type26: default
+ gain_type27: default
+ gain_type28: default
+ gain_type29: default
+ gain_type3: default
+ gain_type30: default
+ gain_type31: default
+ gain_type4: default
+ gain_type5: default
+ gain_type6: default
+ gain_type7: default
+ gain_type8: default
+ gain_type9: default
+ len_tag_name: '""'
+ lo_export0: 'False'
+ lo_export1: 'False'
+ lo_export10: 'False'
+ lo_export11: 'False'
+ lo_export12: 'False'
+ lo_export13: 'False'
+ lo_export14: 'False'
+ lo_export15: 'False'
+ lo_export16: 'False'
+ lo_export17: 'False'
+ lo_export18: 'False'
+ lo_export19: 'False'
+ lo_export2: 'False'
+ lo_export20: 'False'
+ lo_export21: 'False'
+ lo_export22: 'False'
+ lo_export23: 'False'
+ lo_export24: 'False'
+ lo_export25: 'False'
+ lo_export26: 'False'
+ lo_export27: 'False'
+ lo_export28: 'False'
+ lo_export29: 'False'
+ lo_export3: 'False'
+ lo_export30: 'False'
+ lo_export31: 'False'
+ lo_export4: 'False'
+ lo_export5: 'False'
+ lo_export6: 'False'
+ lo_export7: 'False'
+ lo_export8: 'False'
+ lo_export9: 'False'
+ lo_source0: internal
+ lo_source1: internal
+ lo_source10: internal
+ lo_source11: internal
+ lo_source12: internal
+ lo_source13: internal
+ lo_source14: internal
+ lo_source15: internal
+ lo_source16: internal
+ lo_source17: internal
+ lo_source18: internal
+ lo_source19: internal
+ lo_source2: internal
+ lo_source20: internal
+ lo_source21: internal
+ lo_source22: internal
+ lo_source23: internal
+ lo_source24: internal
+ lo_source25: internal
+ lo_source26: internal
+ lo_source27: internal
+ lo_source28: internal
+ lo_source29: internal
+ lo_source3: internal
+ lo_source30: internal
+ lo_source31: internal
+ lo_source4: internal
+ lo_source5: internal
+ lo_source6: internal
+ lo_source7: internal
+ lo_source8: internal
+ lo_source9: internal
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ nchan: '1'
+ num_mboards: '1'
+ otw: ''
+ samp_rate: samp_rate
+ sd_spec0: ''
+ sd_spec1: ''
+ sd_spec2: ''
+ sd_spec3: ''
+ sd_spec4: ''
+ sd_spec5: ''
+ sd_spec6: ''
+ sd_spec7: ''
+ show_lo_controls: 'False'
+ start_time: '-1.0'
+ stream_args: ''
+ stream_chans: '[]'
+ sync: sync
+ time_source0: ''
+ time_source1: gpsdo
+ time_source2: ''
+ time_source3: ''
+ time_source4: ''
+ time_source5: ''
+ time_source6: ''
+ time_source7: ''
+ type: fc32
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [932, 305]
+ rotation: 0
+ state: enabled
+
+connections:
+- [AISTX_Build_Frame_0, '0', digital_gmsk_mod_0, '0']
+- [AISTX_Build_Frame_1, '0', digital_gmsk_mod_0_0, '0']
+- [analog_sig_source_x_0, '0', blocks_multiply_xx_0, '1']
+- [analog_sig_source_x_0_0, '0', blocks_multiply_xx_0_0, '1']
+- [blocks_add_xx_0, '0', blocks_selector_0, '0']
+- [blocks_multiply_const_vxx_0, '0', blocks_add_xx_0, '0']
+- [blocks_multiply_const_vxx_0_0, '0', blocks_add_xx_0, '1']
+- [blocks_multiply_const_vxx_0_1, '0', blocks_selector_0, '1']
+- [blocks_multiply_const_vxx_0_1_0, '0', blocks_selector_0, '2']
+- [blocks_multiply_xx_0, '0', blocks_multiply_const_vxx_0, '0']
+- [blocks_multiply_xx_0, '0', blocks_multiply_const_vxx_0_1, '0']
+- [blocks_multiply_xx_0_0, '0', blocks_multiply_const_vxx_0_0, '0']
+- [blocks_multiply_xx_0_0, '0', blocks_multiply_const_vxx_0_1_0, '0']
+- [blocks_selector_0, '0', uhd_usrp_sink_0, '0']
+- [digital_gmsk_mod_0, '0', blocks_multiply_xx_0, '0']
+- [digital_gmsk_mod_0_0, '0', blocks_multiply_xx_0_0, '0']
+
+metadata:
+ file_format: 1
+ grc_version: v3.11.0.0git-215-g9a698313
diff --git a/AiS_TX.py b/AiS_TX.py
deleted file mode 100755
index 5ea5deb..0000000
--- a/AiS_TX.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python
-#
-# This script is part of the AIS BlackToolkit.
-# AiS_TX.py implements a software-based AIS transmitter accordingly to specifications (ITU-R M.1371-4).
-#
-# A fully functional GnuRadio installation is required, including our AIS Frame Builder block, namely gr-aistx.
-#
-# Tested on:
-# GnuRadio 3.6.5.1
-# Debian 7.1.0 wheezy
-# GNU C++ version 4.7.3; Boost_104900
-# UHD_003.005.003-0-unknown
-# Ettus USRP B100 Version 2)
-#
-# Copyright 2013-2014 -- Embyte & Pastus
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# Usage example:
-# $ ./AIVDM_Encoder.py --type=1 --mmsi=970010000 --lat=45.6910 --long=9.7235 | xargs -IX ./AiS_TX.py --payload=X --channel=A
-#
-
-from gnuradio import blocks
-from gnuradio import digital
-from gnuradio import eng_notation
-from gnuradio import gr
-from gnuradio import uhd
-from gnuradio.eng_option import eng_option
-from gnuradio.gr import firdes
-from grc_gnuradio import wxgui as grc_wxgui
-from optparse import OptionParser
-import AISTX
-import time
-import wx
-
-class top_block(grc_wxgui.top_block_gui):
-
- def __init__(self, p, c, pw, ff, sr, br):
- grc_wxgui.top_block_gui.__init__(self, title="Top Block")
- _icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
- self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
-
- ##################################################
- # Variables
- ##################################################
- self.samp_rate = samp_rate = sr
- self.channel_select = channel_select = c
- self.bit_rate = bit_rate = br
-
- ##################################################
- # Blocks
- ##################################################
- self.uhd_usrp_sink_0 = uhd.usrp_sink(
- device_addr="",
- stream_args=uhd.stream_args(
- cpu_format="fc32",
- channels=range(1),
- ),
- )
- self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
- self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request_t(161975000+50000*c,ff), 0)
- self.uhd_usrp_sink_0.set_gain(-10, 0)
- self.uhd_usrp_sink_0.set_antenna("TX/RX", 0)
- self.digital_gmsk_mod_0 = digital.gmsk_mod(
- samples_per_symbol=int(samp_rate/bit_rate),
- bt=0.4,
- verbose=False,
- log=False,
- )
- self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vcc((0.9, ))
- self.AISTX_Build_Frame_0 = AISTX.Build_Frame(p, False, True)
-
- ##################################################
- # Connections
- ##################################################
- self.connect((self.AISTX_Build_Frame_0, 0), (self.digital_gmsk_mod_0, 0))
- self.connect((self.digital_gmsk_mod_0, 0), (self.blocks_multiply_const_vxx_0, 0))
- self.connect((self.blocks_multiply_const_vxx_0, 0), (self.uhd_usrp_sink_0, 0))
-
-
- def get_samp_rate(self):
- return self.samp_rate
-
- def set_samp_rate(self, samp_rate):
- self.samp_rate = samp_rate
- self.uhd_usrp_sink_0.set_samp_rate(self.samp_rate)
-
- def get_channel_select(self):
- return self.channel_select
-
- def set_channel_select(self, channel_select):
- self.channel_select = channel_select
- self.analog_sig_source_x_0.set_frequency(-25000+50000*self.channel_select)
-
- def get_bit_rate(self):
- return self.bit_rate
-
- def set_bit_rate(self, bit_rate):
- self.bit_rate = bit_rate
-
-if __name__ == '__main__':
-
- desc="""GnuRadio-Based AIS Transmitter. Copyright Embyte & Pastus 2013-2014."""
-
- parser = OptionParser(option_class=eng_option, usage="%prog: [options]", description=desc)
-
- parser.add_option("--payload", help="""Specify the message payload to transmit
- (e.g., crafted via AIVDM_Encoder)""")
- parser.add_option("--channel", help="""Specify the AIS channel:
- - A: 161.975Mhz (87B)
- - B: 162.025Mhz (88B)""")
- parser.add_option("--power", help="""Specify the transmisson power, between -12dB and +12dB (default is -10dB)""", type="int", default = -10)
- parser.add_option("--filter_frequency", help="""Specify the filter frequency (default is 19MHz)""", type="int", default = 19000000)
- parser.add_option("--sampling_rate", help="""Specify the sampling rate (default is 326.531KHz)""", type="int", default = 326531)
- parser.add_option("--bit_rate", help="""Specify the bit rate (default is 9600 baud)""", type="int", default = 9600)
-
- (options, args) = parser.parse_args()
-
- if not options.payload:
- parser.error("Payload not specified: -h for help.")
-
- if not options.channel:
- parser.error("Channel not specified: -h for help.")
-
- if options.channel!="A" and options.channel!="B":
- parser.error("Channel accepts value A or B: -h for help")
-
- channel_ID = 0 if options.channel=="A" else 1
-
- tb = top_block(p=options.payload, c=channel_ID, pw=options.power, ff=options.filter_frequency, sr=options.sampling_rate, br=options.bit_rate)
- tb.Run(True)
-
diff --git a/doc/wp-a-security-evaluation-of-ais.pdf b/doc/wp-a-security-evaluation-of-ais.pdf
new file mode 100644
index 0000000..66a791c
Binary files /dev/null and b/doc/wp-a-security-evaluation-of-ais.pdf differ
diff --git a/gr-aistx/.clang-format b/gr-aistx/.clang-format
new file mode 100644
index 0000000..3e4ddd4
--- /dev/null
+++ b/gr-aistx/.clang-format
@@ -0,0 +1,104 @@
+---
+Language: Cpp
+# BasedOnStyle: LLVM
+AccessModifierOffset: -4
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+AlignEscapedNewlinesLeft: true
+AlignOperands: true
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: All
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: false
+AlwaysBreakTemplateDeclarations: true
+BinPackArguments: false
+BinPackParameters: false
+BreakBeforeBraces: Custom
+BraceWrapping:
+ AfterClass: true
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: true
+ AfterNamespace: false
+ AfterObjCDeclaration: false
+ AfterStruct: false
+ AfterUnion: false
+ BeforeCatch: false
+ BeforeElse: false
+ IndentBraces: false
+BreakBeforeBinaryOperators: None
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit: 90
+CommentPragmas: '^ IWYU pragma:'
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: false
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+ForEachMacros:
+ - foreach
+ - Q_FOREACH
+ - BOOST_FOREACH
+IncludeCategories:
+ - Regex: '^"(gnuradio)/'
+ Priority: 1
+ - Regex: '^<(gnuradio)/'
+ Priority: 2
+ - Regex: '^<(boost)/'
+ Priority: 98
+ - Regex: '^<[a-z]*>$'
+ Priority: 99
+ - Regex: '^".*"$'
+ Priority: 0
+ - Regex: '.*'
+ Priority: 10
+
+IncludeIsMainRegex: '(Test)?$'
+IndentCaseLabels: false
+IndentWidth: 4
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: true
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 2
+NamespaceIndentation: None
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakBeforeFirstCallParameter: 19
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 60
+PointerAlignment: Left
+ReflowComments: true
+SortIncludes: true
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeParens: ControlStatements
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: true
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp11
+TabWidth: 8
+UseTab: Never
diff --git a/gr-aistx/.conda/README.md b/gr-aistx/.conda/README.md
new file mode 100644
index 0000000..d0c0258
--- /dev/null
+++ b/gr-aistx/.conda/README.md
@@ -0,0 +1,110 @@
+# gr-aistx conda recipe
+
+This recipe is for creating a package that can be installed into a [conda](https://docs.conda.io/en/latest/) environment. See the [Conda GNU Radio Guide](https://wiki.gnuradio.org/index.php/CondaInstall) for more information on using GNU Radio with conda.
+
+Packages for GNU Radio and some out-of-tree (OOT) modules are available through the [`conda-forge` channel](https://conda-forge.org/). If this OOT module is already available (search "gnuradio" on [anaconda.org](https://anaconda.org)), it is preferable to use that existing package rather than this recipe.
+
+#### Users
+
+- [Building the package](#building-the-package)
+
+#### Developers
+
+- [Modifying the recipe](#modifying-the-recipe)
+- [Continuous integration](#continuous-integration)
+
+
+## Building the package
+
+(See the [Conda GNU Radio Guide](https://wiki.gnuradio.org/index.php/CondaInstall) if you are unfamiliar with how to use conda.)
+
+1. Make sure that have `conda-build` and `conda-forge-pinning` installed and updated in your base environment:
+
+ conda activate base
+ conda install -n base conda-build conda-forge-pinning
+ conda upgrade -n base conda-build conda-forge-pinning
+
+ **Windows users only**: you will also need to have Microsoft's Visual C++ build tools installed. Usually you can do this by installing the [Community edition of Visual Studio](https://visualstudio.microsoft.com/free-developer-offers/) and then selecting a MSVC C++ x64/x86 build tools component under the list of "Individual Components". As of this writing, you will specifically need MSVC v141, i.e. the "MSVC v141 - VS2017 C++ x64/x86 build tools (v14.16)" component. If the build fails to find the version of MSVC it is looking for, try installing other (newer) versions.
+
+2. Download the source code for this OOT module (which includes this recipe). Typically, this is done by using `git` and cloning the module's repository:
+
+ git clone
+ cd
+
+3. Run `conda-build` on the recipe to create the package:
+
+ (Linux and macOS)
+
+ conda build .conda/recipe/ -m ${CONDA_PREFIX}/conda_build_config.yaml
+
+ (Windows)
+
+ conda build .conda\recipe\ -m %CONDA_PREFIX%\conda_build_config.yaml
+
+ If you plan on using this package within an existing environment which uses a specific version of Python, specify the version of Python using the `--python` flag. You must use a version string that matches one of the strings listed under `python` in the `${CONDA_PREFIX}/conda_build_config.yaml` file, e.g:
+
+ (Linux and macOS)
+
+ conda build .conda/recipe/ -m ${CONDA_PREFIX}/conda_build_config.yaml --python="3.9.* *_cpython"
+
+ (Windows)
+
+ conda build .conda\recipe\ -m %CONDA_PREFIX%\conda_build_config.yaml --python="3.9.* *_cpython"
+
+ If you encounter errors, consult with the OOT module maintainer or the maintainers of the [gnuradio feedstock](https://github.com/conda-forge/gnuradio-feedstock). It is possible that the recipe will need to be updated.
+
+4. Install the package into an existing environment
+
+ conda install --use-local -n gnuradio-EXAMPLE
+
+ or create a new environment that includes the package:
+
+ conda create -n test_env gnuradio-EXAMPLE
+
+
+## Modifying the recipe
+
+This recipe is derived from a template, and so it is best to check it and make any necessary modifications. Likely changes include:
+
+- Populating metadata near the bottom of the `recipe/meta.yaml` file
+- Adding "host" (build-time) and "run" (run-time) dependencies specific to your module in `recipe/meta.yaml`
+- Adding special configuration flags or steps are necessary to carry out the build to the build scripts (`recipe/build.sh` for Linux/macOS and `recipe/bld.bat` for Windows)
+
+Specifying the versions of GNU Radio that your OOT is compatible with is one of the most important modifications. Following the instructions below, the module will be built against the conda-forge "pinned" version of GNU Radio, which is usually the latest version.
+
+- To override the pinned version of GNU Radio (e.g. for a branch that builds against an older version), specify the `gnuradio_core` key as instructed in `recipe/conda_build_config.yaml`.
+- If the module is compatible with multiple major versions of GNU Radio, and you want to build against multiple of them, you can also add extra versions to `recipe/conda_build_config.yaml` to expand the default build matrix.
+
+See the [conda-build documentation](https://docs.conda.io/projects/conda-build/en/latest/index.html) for details on how to write a conda recipe.
+
+
+## Continuous integration
+
+Only a few steps are needed to use this recipe to build and test this OOT module using CI services. It can also be used to upload packages to [anaconda.org](https://anaconda.org) for others to download and use.
+
+1. Make sure that have `conda-smithy` installed in your base conda environment:
+
+ conda activate base
+ conda install -n base conda-smithy
+ conda upgrade -n base conda-smithy
+
+2. Make any changes to the recipe and `conda-forge.yml` that are necessary. For example, if you plan on uploading packages to your own [anaconda.org](https://anaconda.org) channel, specify the channel name and label as the `channel_targets` key in `recipe/conda_build_config.yaml`. Commit the changes to your repository:
+
+ git commit -a
+
+3. "Re-render" the CI scripts by running conda-smithy from the root of your repository:
+
+ conda-smithy rerender --feedstock_config .conda/conda-forge.yml -c auto
+
+ This will create a commit that adds or updates the CI scripts that have been configured with `conda-forge.yml`. If you want to minimize extraneous files, you can remove some of the newly-created files that are not necessary outside of a typical conda-forge feedstock:
+
+ git rm -f .github/workflows/automerge.yml .github/workflows/webservices.yml .circleci/config.yml
+ git commit --amend -s
+
+ When the CI is executed (on a pull request or commit), it will run one job per configuration file in `.ci_support` to build packages for various platforms, Python versions, and optionally `gnuradio` versions (by adding to `gnuradio_extra_pin` in `recipe/conda_build_config.yaml`).
+
+ **You should repeat this step whenever the recipe is updated or when changes to the conda-forge infrastructure require all CI scripts to be updated.**
+
+ Since the newly created files will be rewritten whenever conda-smithy is run, you should not edit any of the automatically-generated files in e.g. `.ci_support`, `.scripts`, or `.github/workflows/conda-build.yml`.
+
+4. (optional) If you want to enable uploads of the packages to [anaconda.org](https://anaconda.org) whenever the CI is run from a commit on the branch specified in `conda-forge.yml`, you need to set an Anaconda Cloud API token to the `BINSTAR_TOKEN` environment variable. To generate a token, follow the instructions [here](https://docs.anaconda.com/anacondaorg/user-guide/tasks/work-with-accounts/#creating-access-tokens). To populate the `BINSTAR_TOKEN` environment variable for CI jobs, add the token as a secret by following, for example, the [Github docs](https://docs.github.com/en/actions/reference/encrypted-secrets).
diff --git a/gr-aistx/.conda/conda-forge.yml b/gr-aistx/.conda/conda-forge.yml
new file mode 100644
index 0000000..66c58f2
--- /dev/null
+++ b/gr-aistx/.conda/conda-forge.yml
@@ -0,0 +1,30 @@
+# See https://conda-forge.org/docs/maintainer/conda_forge_yml.html for
+# documentation on possible keys and values.
+
+# uncomment to enable cross-compiled osx-arm64 builds
+#build_platform:
+# osx_arm64: osx_64
+clone_depth: 0
+github_actions:
+ store_build_artifacts: true
+os_version:
+ linux_64: cos7
+provider:
+ linux: github_actions
+ osx: github_actions
+ win: github_actions
+ # uncomment to enable additional linux platforms
+ #linux_aarch64: github_actions
+ #linux_ppc64le: github_actions
+recipe_dir: .conda/recipe
+# skip unnecessary files since this is not a full-fledged conda-forge feedstock
+skip_render:
+ - README.md
+ - LICENSE.txt
+ - .gitattributes
+ - .gitignore
+ - build-locally.py
+ - LICENSE
+test: native_and_emulated
+# enable uploads to Anaconda Cloud from specified branches only
+upload_on_branch: main
diff --git a/gr-aistx/.conda/recipe/bld.bat b/gr-aistx/.conda/recipe/bld.bat
new file mode 100644
index 0000000..de4a3b7
--- /dev/null
+++ b/gr-aistx/.conda/recipe/bld.bat
@@ -0,0 +1,29 @@
+setlocal EnableDelayedExpansion
+@echo on
+
+:: Make a build folder and change to it
+cmake -E make_directory buildconda
+cd buildconda
+
+:: configure
+cmake -G "Ninja" ^
+ -DCMAKE_BUILD_TYPE:STRING=Release ^
+ -DCMAKE_INSTALL_PREFIX:PATH="%LIBRARY_PREFIX%" ^
+ -DCMAKE_PREFIX_PATH:PATH="%LIBRARY_PREFIX%" ^
+ -DGR_PYTHON_DIR:PATH="%SP_DIR%" ^
+ -DENABLE_DOXYGEN=OFF ^
+ -DENABLE_TESTING=ON ^
+ ..
+if errorlevel 1 exit 1
+
+:: build
+cmake --build . --config Release -- -j%CPU_COUNT%
+if errorlevel 1 exit 1
+
+:: install
+cmake --build . --config Release --target install
+if errorlevel 1 exit 1
+
+:: test
+ctest --build-config Release --output-on-failure --timeout 120 -j%CPU_COUNT%
+if errorlevel 1 exit 1
diff --git a/gr-aistx/.conda/recipe/build.sh b/gr-aistx/.conda/recipe/build.sh
new file mode 100644
index 0000000..2f3df70
--- /dev/null
+++ b/gr-aistx/.conda/recipe/build.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -ex
+
+cmake -E make_directory buildconda
+cd buildconda
+
+cmake_config_args=(
+ -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_INSTALL_PREFIX=$PREFIX
+ -DLIB_SUFFIX=""
+ -DENABLE_DOXYGEN=OFF
+ -DENABLE_TESTING=ON
+)
+
+cmake ${CMAKE_ARGS} -G "Ninja" .. "${cmake_config_args[@]}"
+cmake --build . --config Release -- -j${CPU_COUNT}
+cmake --build . --config Release --target install
+
+if [[ "${CONDA_BUILD_CROSS_COMPILATION:-}" != "1" || "${CROSSCOMPILING_EMULATOR}" != "" ]]; then
+ ctest --build-config Release --output-on-failure --timeout 120 -j${CPU_COUNT}
+fi
diff --git a/gr-aistx/.conda/recipe/conda_build_config.yaml b/gr-aistx/.conda/recipe/conda_build_config.yaml
new file mode 100644
index 0000000..ce9c0f6
--- /dev/null
+++ b/gr-aistx/.conda/recipe/conda_build_config.yaml
@@ -0,0 +1,14 @@
+# this is the channel and label where packages will be uploaded to if enabled
+# (see ../README.md)
+channel_targets:
+ - gnuradio main
+# override the conda-forge pin for gnuradio-core by uncommenting
+# and specifying a different version here
+#gnuradio_core:
+ #- "3.10.1"
+gnuradio_extra_pin:
+ # always leave one entry with the empty string
+ - ""
+ # add version strings here like to get builds for versions other than
+ # the conda-forge-wide default or version specified above for gnuradio_core
+ #- "3.9.5"
diff --git a/gr-aistx/.conda/recipe/meta.yaml b/gr-aistx/.conda/recipe/meta.yaml
new file mode 100644
index 0000000..0f2f4b8
--- /dev/null
+++ b/gr-aistx/.conda/recipe/meta.yaml
@@ -0,0 +1,88 @@
+{% set oot_name = "aistx" %}
+{% set name = "gnuradio-" + oot_name %}
+{% set version = (environ.get("GIT_DESCRIBE_TAG_PEP440", "0.0.0." + datetime.datetime.now().strftime("%Y%m%d") + ".dev+" + environ.get("GIT_DESCRIBE_HASH", "local"))|string) %}
+
+package:
+ name: {{ name|lower }}
+ version: {{ version }}
+
+source:
+ # use local path or git repository depending on if the build is local or done on CI
+ path: "../.." # [not os.environ.get("CI")]
+ git_url: {{ environ.get('FEEDSTOCK_ROOT', "../..") }} # [os.environ.get("CI")]
+
+build:
+ number: 0
+
+requirements:
+ build:
+ - {{ compiler("c") }}
+ - {{ compiler("cxx") }}
+ - cmake
+ - git
+ - ninja
+ - pkg-config
+ # cross-compilation requirements
+ - python # [build_platform != target_platform]
+ - cross-python_{{ target_platform }} # [build_platform != target_platform]
+ - numpy # [build_platform != target_platform]
+ - pybind11 # [build_platform != target_platform]
+ # Add extra build tool dependencies here
+
+ host:
+ - gmp # [linux]
+ # the following two entries are for generating builds against specific GR versions
+ - gnuradio-core # [not gnuradio_extra_pin]
+ - gnuradio-core {{ gnuradio_extra_pin }}.* # [gnuradio_extra_pin]
+ - pip # [win]
+ - pybind11
+ - python
+ - numpy
+ - volk
+ # Add/remove library dependencies here
+
+ run:
+ - numpy
+ - python
+ # Add/remove runtime dependencies here
+
+test:
+ commands:
+ # Add a list of commands to run to check that the package works. Examples below.
+
+ ## verify that commands run
+ #- COMMAND --help
+
+ # verify that (some) headers get installed
+ - test -f $PREFIX/include/gnuradio/{{ oot_name }}/api.h # [not win]
+ - if not exist %PREFIX%\\Library\\include\\gnuradio\\{{ oot_name }}\\api.h exit 1 # [win]
+
+ ## verify that libraries get installed
+ #- test -f $PREFIX/lib/lib{{ name }}${SHLIB_EXT} # [not win]
+ #- if not exist %PREFIX%\\Library\\bin\\{{ name }}.dll exit 1 # [win]
+ #- if not exist %PREFIX%\\Library\\lib\\{{ name }}.lib exit 1 # [win]
+
+ ## verify that (some) GRC blocks get installed
+ #{% set blocks = ["LIST", "OF", "GRC", "BLOCK", "NAMES"] %}
+ #{% for block in blocks %}
+ #- test -f $PREFIX/share/gnuradio/grc/blocks/{{ block }}.block.yml # [not win]
+ #- if not exist %PREFIX%\\Library\\share\\gnuradio\\grc\\blocks\\{{ block }}.block.yml exit 1 # [win]
+ #{% endfor %}
+
+ imports:
+ # verify that the python module imports
+ - gnuradio.{{ oot_name }}
+
+about:
+ # For licenses, use the SPDX identifier, e.g: "GPL-2.0-only" instead of
+ # "GNU General Public License version 2.0". See https://spdx.org/licenses/.
+ # Include the license text by using the license_file entry set to the path
+ # of the license text file within the source directory, e.g. "LICENSE".
+ # See https://docs.conda.io/projects/conda-build/en/latest/resources/define-metadata.html#license-file
+
+ #home: https://github.com//gr-aistx
+ #license: GPL-3.0-or-later
+ #license_file: LICENSE
+ #summary: GNU Radio aistx module
+ #description: >
+ # Short description of the gr-aistx module.
diff --git a/gr-aistx/CMakeLists.txt b/gr-aistx/CMakeLists.txt
index 102755b..ea958e6 100644
--- a/gr-aistx/CMakeLists.txt
+++ b/gr-aistx/CMakeLists.txt
@@ -1,148 +1,148 @@
-# Copyright 2011,2012 Free Software Foundation, Inc.
+# Copyright 2011-2020 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
+# Select the release build type by default to get optimization flags.
+# This has to come before project() which otherwise initializes it.
+# Build type can still be overridden by setting -DCMAKE_BUILD_TYPE=
+set(CMAKE_BUILD_TYPE "Release" CACHE STRING "")
########################################################################
# Project setup
########################################################################
-cmake_minimum_required(VERSION 2.6)
-project(gr-AISTX CXX C)
+cmake_minimum_required(VERSION 3.8)
+project(gr-aistx CXX C)
enable_testing()
-#select the release build type by default to get optimization flags
-if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Release")
- message(STATUS "Build type not specified: defaulting to release.")
-endif(NOT CMAKE_BUILD_TYPE)
-set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "")
+# Install to PyBOMBS target prefix if defined
+if(DEFINED ENV{PYBOMBS_PREFIX})
+ set(CMAKE_INSTALL_PREFIX $ENV{PYBOMBS_PREFIX})
+ message(STATUS "PyBOMBS installed GNU Radio. Setting CMAKE_INSTALL_PREFIX to $ENV{PYBOMBS_PREFIX}")
+endif()
+
+# Make sure our local CMake Modules path comes first
+list(INSERT CMAKE_MODULE_PATH 0 ${PROJECT_SOURCE_DIR}/cmake/Modules)
+# Find gnuradio to get access to the cmake modules
+find_package(Gnuradio "3.10" REQUIRED)
+
+# Set the version information here
+set(VERSION_MAJOR 1)
+set(VERSION_API 0)
+set(VERSION_ABI 0)
+set(VERSION_PATCH 0)
+
+cmake_policy(SET CMP0011 NEW)
-list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules)
+# Enable generation of compile_commands.json for code completion engines
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
########################################################################
-# Compiler specific setup
+# Minimum Version Requirements
########################################################################
-if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32)
- #http://gcc.gnu.org/wiki/Visibility
- add_definitions(-fvisibility=hidden)
-endif()
+
+include(GrMinReq)
########################################################################
-# Find boost
+# Compiler settings
########################################################################
-if(UNIX AND EXISTS "/usr/lib64")
- list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix
-endif(UNIX AND EXISTS "/usr/lib64")
-set(Boost_ADDITIONAL_VERSIONS
- "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39"
- "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44"
- "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49"
- "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54"
- "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59"
- "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64"
- "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69"
-)
-find_package(Boost "1.35" COMPONENTS filesystem system)
-if(NOT Boost_FOUND)
- message(FATAL_ERROR "Boost required to compile AISTX")
-endif()
+include(GrCompilerSettings)
########################################################################
# Install directories
########################################################################
+include(GrVersion)
+
include(GrPlatform) #define LIB_SUFFIX
-set(GR_RUNTIME_DIR bin)
-set(GR_LIBRARY_DIR lib${LIB_SUFFIX})
-set(GR_INCLUDE_DIR include/AISTX)
-set(GR_DATA_DIR share)
+
+if(NOT CMAKE_MODULES_DIR)
+ set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake)
+endif(NOT CMAKE_MODULES_DIR)
+
+set(GR_INCLUDE_DIR include/gnuradio/aistx)
+set(GR_CMAKE_DIR ${CMAKE_MODULES_DIR}/gnuradio-aistx)
set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME})
-set(GR_DOC_DIR ${GR_DATA_DIR}/doc)
set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME})
-set(GR_CONF_DIR etc)
set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d)
-set(GR_LIBEXEC_DIR libexec)
set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME})
-set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks)
########################################################################
-# Find gnuradio build dependencies
+# On Apple only, set install name and use rpath correctly, if not already set
########################################################################
-find_package(GnuradioRuntime)
-find_package(CppUnit)
-
-# To run a more advanced search for GNU Radio and it's components and
-# versions, use the following. Add any components required to the list
-# of GR_REQUIRED_COMPONENTS (in all caps) and change "version" to the
-# minimum API compatible version required.
-#
-# set(GR_REQUIRED_COMPONENTS CORE BLOCKS FILTER ...)
-# find_package(Gnuradio "version")
-
-
-if(NOT GNURADIO_RUNTIME_FOUND)
- message(FATAL_ERROR "GnuRadio Runtime required to compile AISTX")
-endif()
-
-if(NOT CPPUNIT_FOUND)
- message(FATAL_ERROR "CppUnit required to compile AISTX")
-endif()
+if(APPLE)
+ if(NOT CMAKE_INSTALL_NAME_DIR)
+ set(CMAKE_INSTALL_NAME_DIR
+ ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE
+ PATH "Library Install Name Destination Directory" FORCE)
+ endif(NOT CMAKE_INSTALL_NAME_DIR)
+ if(NOT CMAKE_INSTALL_RPATH)
+ set(CMAKE_INSTALL_RPATH
+ ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE
+ PATH "Library Install RPath" FORCE)
+ endif(NOT CMAKE_INSTALL_RPATH)
+ if(NOT CMAKE_BUILD_WITH_INSTALL_RPATH)
+ set(CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE
+ BOOL "Do Build Using Library Install RPath" FORCE)
+ endif(NOT CMAKE_BUILD_WITH_INSTALL_RPATH)
+endif(APPLE)
########################################################################
-# Setup the include and linker paths
+# Find gnuradio build dependencies
########################################################################
-include_directories(
- ${CMAKE_SOURCE_DIR}/include
- ${Boost_INCLUDE_DIRS}
- ${CPPUNIT_INCLUDE_DIRS}
- ${GNURADIO_RUNTIME_INCLUDE_DIRS}
- ${GNURADIO_RUNTIME_INCLUDE_DIRS}/gnuradio/swig
-)
+find_package(Doxygen)
-link_directories(
- ${Boost_LIBRARY_DIRS}
- ${CPPUNIT_LIBRARY_DIRS}
- ${GNURADIO_RUNTIME_LIBRARY_DIRS}
-)
-
-# Set component parameters
-set(GR_AISTX_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE)
-set(GR_AISTX_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE)
+########################################################################
+# Setup doxygen option
+########################################################################
+if(DOXYGEN_FOUND)
+ option(ENABLE_DOXYGEN "Build docs using Doxygen" ON)
+else(DOXYGEN_FOUND)
+ option(ENABLE_DOXYGEN "Build docs using Doxygen" OFF)
+endif(DOXYGEN_FOUND)
########################################################################
# Create uninstall target
########################################################################
configure_file(
- ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in
+ ${PROJECT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
@ONLY)
add_custom_target(uninstall
${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
-)
+ )
########################################################################
# Add subdirectories
########################################################################
-add_subdirectory(include/AISTX)
+add_subdirectory(include/gnuradio/aistx)
add_subdirectory(lib)
-add_subdirectory(swig)
-add_subdirectory(python)
-add_subdirectory(grc)
add_subdirectory(apps)
add_subdirectory(docs)
+# NOTE: manually update below to use GRC to generate C++ flowgraphs w/o python
+if(ENABLE_PYTHON)
+ message(STATUS "PYTHON and GRC components are enabled")
+ add_subdirectory(python/aistx)
+ add_subdirectory(grc)
+else(ENABLE_PYTHON)
+ message(STATUS "PYTHON and GRC components are disabled")
+endif(ENABLE_PYTHON)
+
+########################################################################
+# Install cmake search helper for this library
+########################################################################
+
+install(FILES cmake/Modules/gnuradio-aistxConfig.cmake
+ DESTINATION ${GR_CMAKE_DIR}
+)
+
+include(CMakePackageConfigHelpers)
+configure_package_config_file(
+ ${PROJECT_SOURCE_DIR}/cmake/Modules/targetConfig.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/cmake/Modules/${target}Config.cmake
+ INSTALL_DESTINATION ${GR_CMAKE_DIR}
+)
diff --git a/gr-aistx/GPL b/gr-aistx/GPL
deleted file mode 100644
index 94a0453..0000000
--- a/gr-aistx/GPL
+++ /dev/null
@@ -1,621 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
diff --git a/gr-aistx/MANIFEST.md b/gr-aistx/MANIFEST.md
new file mode 100644
index 0000000..2faf06d
--- /dev/null
+++ b/gr-aistx/MANIFEST.md
@@ -0,0 +1,17 @@
+title: The AISTX OOT Module
+brief: Short description of gr-aistx
+tags: # Tags are arbitrary, but look at CGRAN what other authors are using
+ - sdr
+author:
+ - Author Name
+copyright_owner:
+ - Copyright Owner 1
+license:
+gr_supported_version: # Put a comma separated list of supported GR versions here
+#repo: # Put the URL of the repository here, or leave blank for default
+#website: # If you have a separate project website, put it here
+#icon: # Put a URL to a square image here that will be used as an icon on CGRAN
+---
+A longer, multi-line description of gr-aistx.
+You may use some *basic* Markdown here.
+If left empty, it will try to find a README file instead.
diff --git a/gr-aistx/README b/gr-aistx/README
deleted file mode 100644
index 31fb5ec..0000000
--- a/gr-aistx/README
+++ /dev/null
@@ -1,24 +0,0 @@
-
-This directory contains a custom block for GnuRadio we called AIS Frame Builder.
-It is part of the AIS BlackToolkit.
-
-This block serves as generator of AIS frames and implements the full AIS stack.
-It is composed of three main components covering respectively the
-application/presentation layers, the link layer and the physical layer,
-as defined in the protocol specification for AIS.
-
-Install as described in the official out-of-tree documentation, i.e.:
-
-$ mkdir build
-$ cd build
-$ cmake ../
-$ make
-$ sudo make install
-
-Copyright 2013-2014 -- Embyte & Pastus
-
-This program is free software; you can redistribute it and/or
-modify it under the terms of the GNU General Public License
-as published by the Free Software Foundation; either version 2
-of the License, or (at your option) any later version.
-
diff --git a/gr-aistx/apps/CMakeLists.txt b/gr-aistx/apps/CMakeLists.txt
index c837d77..5f6cb9a 100644
--- a/gr-aistx/apps/CMakeLists.txt
+++ b/gr-aistx/apps/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
include(GrPython)
diff --git a/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake b/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake
index 7ce4c49..66016cb 100644
--- a/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake
+++ b/gr-aistx/cmake/Modules/CMakeParseArgumentsCopy.cmake
@@ -58,7 +58,7 @@
# the new option.
# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in
# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would
-# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor.
+# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefore.
#=============================================================================
# Copyright 2010 Alexander Neundorf
diff --git a/gr-aistx/cmake/Modules/FindCppUnit.cmake b/gr-aistx/cmake/Modules/FindCppUnit.cmake
deleted file mode 100644
index 9af308f..0000000
--- a/gr-aistx/cmake/Modules/FindCppUnit.cmake
+++ /dev/null
@@ -1,36 +0,0 @@
-# http://www.cmake.org/pipermail/cmake/2006-October/011446.html
-# Modified to use pkg config and use standard var names
-
-#
-# Find the CppUnit includes and library
-#
-# This module defines
-# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc.
-# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit.
-# CPPUNIT_FOUND, If false, do not try to use CppUnit.
-
-INCLUDE(FindPkgConfig)
-PKG_CHECK_MODULES(PC_CPPUNIT "cppunit")
-
-FIND_PATH(CPPUNIT_INCLUDE_DIRS
- NAMES cppunit/TestCase.h
- HINTS ${PC_CPPUNIT_INCLUDE_DIR}
- PATHS
- /usr/local/include
- /usr/include
-)
-
-FIND_LIBRARY(CPPUNIT_LIBRARIES
- NAMES cppunit
- HINTS ${PC_CPPUNIT_LIBDIR}
- PATHS
- ${CPPUNIT_INCLUDE_DIRS}/../lib
- /usr/local/lib
- /usr/lib
-)
-
-LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS})
-
-INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
-MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
diff --git a/gr-aistx/cmake/Modules/FindGnuradioRuntime.cmake b/gr-aistx/cmake/Modules/FindGnuradioRuntime.cmake
deleted file mode 100644
index 2833fb1..0000000
--- a/gr-aistx/cmake/Modules/FindGnuradioRuntime.cmake
+++ /dev/null
@@ -1,35 +0,0 @@
-INCLUDE(FindPkgConfig)
-PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime)
-
-if(PC_GNURADIO_RUNTIME_FOUND)
- # look for include files
- FIND_PATH(
- GNURADIO_RUNTIME_INCLUDE_DIRS
- NAMES gnuradio/top_block.h
- HINTS $ENV{GNURADIO_RUNTIME_DIR}/include
- ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS}
- ${CMAKE_INSTALL_PREFIX}/include
- PATHS /usr/local/include
- /usr/include
- )
-
- # look for libs
- FIND_LIBRARY(
- GNURADIO_RUNTIME_LIBRARIES
- NAMES gnuradio-runtime
- HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib
- ${PC_GNURADIO_RUNTIME_LIBDIR}
- ${CMAKE_INSTALL_PREFIX}/lib/
- ${CMAKE_INSTALL_PREFIX}/lib64/
- PATHS /usr/local/lib
- /usr/local/lib64
- /usr/lib
- /usr/lib64
- )
-
- set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND})
-endif(PC_GNURADIO_RUNTIME_FOUND)
-
-INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS)
-MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS)
diff --git a/gr-aistx/cmake/Modules/GrMiscUtils.cmake b/gr-aistx/cmake/Modules/GrMiscUtils.cmake
deleted file mode 100644
index 9331d5d..0000000
--- a/gr-aistx/cmake/Modules/GrMiscUtils.cmake
+++ /dev/null
@@ -1,210 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE)
-
-########################################################################
-# Set global variable macro.
-# Used for subdirectories to export settings.
-# Example: include and library paths.
-########################################################################
-function(GR_SET_GLOBAL var)
- set(${var} ${ARGN} CACHE INTERNAL "" FORCE)
-endfunction(GR_SET_GLOBAL)
-
-########################################################################
-# Set the pre-processor definition if the condition is true.
-# - def the pre-processor definition to set and condition name
-########################################################################
-function(GR_ADD_COND_DEF def)
- if(${def})
- add_definitions(-D${def})
- endif(${def})
-endfunction(GR_ADD_COND_DEF)
-
-########################################################################
-# Check for a header and conditionally set a compile define.
-# - hdr the relative path to the header file
-# - def the pre-processor definition to set
-########################################################################
-function(GR_CHECK_HDR_N_DEF hdr def)
- include(CheckIncludeFileCXX)
- CHECK_INCLUDE_FILE_CXX(${hdr} ${def})
- GR_ADD_COND_DEF(${def})
-endfunction(GR_CHECK_HDR_N_DEF)
-
-########################################################################
-# Include subdirectory macro.
-# Sets the CMake directory variables,
-# includes the subdirectory CMakeLists.txt,
-# resets the CMake directory variables.
-#
-# This macro includes subdirectories rather than adding them
-# so that the subdirectory can affect variables in the level above.
-# This provides a work-around for the lack of convenience libraries.
-# This way a subdirectory can append to the list of library sources.
-########################################################################
-macro(GR_INCLUDE_SUBDIRECTORY subdir)
- #insert the current directories on the front of the list
- list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR})
- list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR})
-
- #set the current directories to the names of the subdirs
- set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir})
- set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir})
-
- #include the subdirectory CMakeLists to run it
- file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
- include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt)
-
- #reset the value of the current directories
- list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR)
- list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR)
-
- #pop the subdir names of the front of the list
- list(REMOVE_AT _cmake_source_dirs 0)
- list(REMOVE_AT _cmake_binary_dirs 0)
-endmacro(GR_INCLUDE_SUBDIRECTORY)
-
-########################################################################
-# Check if a compiler flag works and conditionally set a compile define.
-# - flag the compiler flag to check for
-# - have the variable to set with result
-########################################################################
-macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have)
- include(CheckCXXCompilerFlag)
- CHECK_CXX_COMPILER_FLAG(${flag} ${have})
- if(${have})
- add_definitions(${flag})
- endif(${have})
-endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE)
-
-########################################################################
-# Generates the .la libtool file
-# This appears to generate libtool files that cannot be used by auto*.
-# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest])
-# Notice: there is not COMPONENT option, these will not get distributed.
-########################################################################
-function(GR_LIBTOOL)
- if(NOT DEFINED GENERATE_LIBTOOL)
- set(GENERATE_LIBTOOL OFF) #disabled by default
- endif()
-
- if(GENERATE_LIBTOOL)
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN})
-
- find_program(LIBTOOL libtool)
- if(LIBTOOL)
- include(CMakeMacroLibtoolFile)
- CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION})
- endif(LIBTOOL)
- endif(GENERATE_LIBTOOL)
-
-endfunction(GR_LIBTOOL)
-
-########################################################################
-# Do standard things to the library target
-# - set target properties
-# - make install rules
-# Also handle gnuradio custom naming conventions w/ extras mode.
-########################################################################
-function(GR_LIBRARY_FOO target)
- #parse the arguments for component names
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN})
-
- #set additional target properties
- set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER})
-
- #install the generated files like so...
- install(TARGETS ${target}
- LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file
- ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file
- RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file
- )
-
- #extras mode enabled automatically on linux
- if(NOT DEFINED LIBRARY_EXTRAS)
- set(LIBRARY_EXTRAS ${LINUX})
- endif()
-
- #special extras mode to enable alternative naming conventions
- if(LIBRARY_EXTRAS)
-
- #create .la file before changing props
- GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR})
-
- #give the library a special name with ultra-zero soversion
- set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0")
- set(target_name lib${target}-${LIBVER}.so.0.0.0)
-
- #custom command to generate symlinks
- add_custom_command(
- TARGET ${target}
- POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
- COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
- COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install
- )
-
- #and install the extra symlinks
- install(
- FILES
- ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
- ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
- DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT}
- )
-
- endif(LIBRARY_EXTRAS)
-endfunction(GR_LIBRARY_FOO)
-
-########################################################################
-# Create a dummy custom command that depends on other targets.
-# Usage:
-# GR_GEN_TARGET_DEPS(unique_name target_deps ...)
-# ADD_CUSTOM_COMMAND( ${target_deps})
-#
-# Custom command cant depend on targets, but can depend on executables,
-# and executables can depend on targets. So this is the process:
-########################################################################
-function(GR_GEN_TARGET_DEPS name var)
- file(
- WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
- "int main(void){return 0;}\n"
- )
- execute_process(
- COMMAND ${CMAKE_COMMAND} -E copy_if_different
- ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
- ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp
- )
- add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp)
- if(ARGN)
- add_dependencies(${name} ${ARGN})
- endif(ARGN)
-
- if(CMAKE_CROSSCOMPILING)
- set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross
- else()
- set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE)
- endif()
-endfunction(GR_GEN_TARGET_DEPS)
diff --git a/gr-aistx/cmake/Modules/GrPlatform.cmake b/gr-aistx/cmake/Modules/GrPlatform.cmake
deleted file mode 100644
index a2e4f3b..0000000
--- a/gr-aistx/cmake/Modules/GrPlatform.cmake
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_PLATFORM_CMAKE TRUE)
-
-########################################################################
-# Setup additional defines for OS types
-########################################################################
-if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
- set(LINUX TRUE)
-endif()
-
-if(LINUX AND EXISTS "/etc/debian_version")
- set(DEBIAN TRUE)
-endif()
-
-if(LINUX AND EXISTS "/etc/redhat-release")
- set(REDHAT TRUE)
-endif()
-
-########################################################################
-# when the library suffix should be 64 (applies to redhat linux family)
-########################################################################
-if(NOT DEFINED LIB_SUFFIX AND REDHAT AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$")
- set(LIB_SUFFIX 64)
-endif()
-set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix")
diff --git a/gr-aistx/cmake/Modules/GrPython.cmake b/gr-aistx/cmake/Modules/GrPython.cmake
deleted file mode 100644
index efdddf3..0000000
--- a/gr-aistx/cmake/Modules/GrPython.cmake
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_PYTHON_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_PYTHON_CMAKE TRUE)
-
-########################################################################
-# Setup the python interpreter:
-# This allows the user to specify a specific interpreter,
-# or finds the interpreter via the built-in cmake module.
-########################################################################
-#this allows the user to override PYTHON_EXECUTABLE
-if(PYTHON_EXECUTABLE)
-
- set(PYTHONINTERP_FOUND TRUE)
-
-#otherwise if not set, try to automatically find it
-else(PYTHON_EXECUTABLE)
-
- #use the built-in find script
- find_package(PythonInterp)
-
- #and if that fails use the find program routine
- if(NOT PYTHONINTERP_FOUND)
- find_program(PYTHON_EXECUTABLE NAMES python python2.7 python2.6 python2.5)
- if(PYTHON_EXECUTABLE)
- set(PYTHONINTERP_FOUND TRUE)
- endif(PYTHON_EXECUTABLE)
- endif(NOT PYTHONINTERP_FOUND)
-
-endif(PYTHON_EXECUTABLE)
-
-#make the path to the executable appear in the cmake gui
-set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter")
-
-#make sure we can use -B with python (introduced in 2.6)
-if(PYTHON_EXECUTABLE)
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -B -c ""
- OUTPUT_QUIET ERROR_QUIET
- RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT
- )
- if(PYTHON_HAS_DASH_B_RESULT EQUAL 0)
- set(PYTHON_DASH_B "-B")
- endif()
-endif(PYTHON_EXECUTABLE)
-
-########################################################################
-# Check for the existence of a python module:
-# - desc a string description of the check
-# - mod the name of the module to import
-# - cmd an additional command to run
-# - have the result variable to set
-########################################################################
-macro(GR_PYTHON_CHECK_MODULE desc mod cmd have)
- message(STATUS "")
- message(STATUS "Python checking for ${desc}")
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -c "
-#########################################
-try: import ${mod}
-except: exit(-1)
-try: assert ${cmd}
-except: exit(-1)
-#########################################"
- RESULT_VARIABLE ${have}
- )
- if(${have} EQUAL 0)
- message(STATUS "Python checking for ${desc} - found")
- set(${have} TRUE)
- else(${have} EQUAL 0)
- message(STATUS "Python checking for ${desc} - not found")
- set(${have} FALSE)
- endif(${have} EQUAL 0)
-endmacro(GR_PYTHON_CHECK_MODULE)
-
-########################################################################
-# Sets the python installation directory GR_PYTHON_DIR
-########################################################################
-execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "
-from distutils import sysconfig
-print sysconfig.get_python_lib(plat_specific=True, prefix='')
-" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE
-)
-file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR)
-
-########################################################################
-# Create an always-built target with a unique name
-# Usage: GR_UNIQUE_TARGET()
-########################################################################
-function(GR_UNIQUE_TARGET desc)
- file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR})
- execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib
-unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5]
-print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))"
- OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE)
- add_custom_target(${_target} ALL DEPENDS ${ARGN})
-endfunction(GR_UNIQUE_TARGET)
-
-########################################################################
-# Install python sources (also builds and installs byte-compiled python)
-########################################################################
-function(GR_PYTHON_INSTALL)
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN})
-
- ####################################################################
- if(GR_PYTHON_INSTALL_FILES)
- ####################################################################
- install(${ARGN}) #installs regular python files
-
- #create a list of all generated files
- unset(pysrcfiles)
- unset(pycfiles)
- unset(pyofiles)
- foreach(pyfile ${GR_PYTHON_INSTALL_FILES})
- get_filename_component(pyfile ${pyfile} ABSOLUTE)
- list(APPEND pysrcfiles ${pyfile})
-
- #determine if this file is in the source or binary directory
- file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile})
- string(LENGTH "${source_rel_path}" source_rel_path_len)
- file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile})
- string(LENGTH "${binary_rel_path}" binary_rel_path_len)
-
- #and set the generated path appropriately
- if(${source_rel_path_len} GREATER ${binary_rel_path_len})
- set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path})
- else()
- set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path})
- endif()
- list(APPEND pycfiles ${pygenfile}c)
- list(APPEND pyofiles ${pygenfile}o)
-
- #ensure generation path exists
- get_filename_component(pygen_path ${pygenfile} PATH)
- file(MAKE_DIRECTORY ${pygen_path})
-
- endforeach(pyfile)
-
- #the command to generate the pyc files
- add_custom_command(
- DEPENDS ${pysrcfiles} OUTPUT ${pycfiles}
- COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles}
- )
-
- #the command to generate the pyo files
- add_custom_command(
- DEPENDS ${pysrcfiles} OUTPUT ${pyofiles}
- COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles}
- )
-
- #create install rule and add generated files to target list
- set(python_install_gen_targets ${pycfiles} ${pyofiles})
- install(FILES ${python_install_gen_targets}
- DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
- COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
- )
-
-
- ####################################################################
- elseif(GR_PYTHON_INSTALL_PROGRAMS)
- ####################################################################
- file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native)
-
- foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS})
- get_filename_component(pyfile_name ${pyfile} NAME)
- get_filename_component(pyfile ${pyfile} ABSOLUTE)
- string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe")
- list(APPEND python_install_gen_targets ${pyexefile})
-
- get_filename_component(pyexefile_path ${pyexefile} PATH)
- file(MAKE_DIRECTORY ${pyexefile_path})
-
- add_custom_command(
- OUTPUT ${pyexefile} DEPENDS ${pyfile}
- COMMAND ${PYTHON_EXECUTABLE} -c
- \"open('${pyexefile}', 'w').write('\#!${pyexe_native}\\n'+open('${pyfile}').read())\"
- COMMENT "Shebangin ${pyfile_name}"
- )
-
- #on windows, python files need an extension to execute
- get_filename_component(pyfile_ext ${pyfile} EXT)
- if(WIN32 AND NOT pyfile_ext)
- set(pyfile_name "${pyfile_name}.py")
- endif()
-
- install(PROGRAMS ${pyexefile} RENAME ${pyfile_name}
- DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
- COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
- )
- endforeach(pyfile)
-
- endif()
-
- GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets})
-
-endfunction(GR_PYTHON_INSTALL)
-
-########################################################################
-# Write the python helper script that generates byte code files
-########################################################################
-file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py "
-import sys, py_compile
-files = sys.argv[1:]
-srcs, gens = files[:len(files)/2], files[len(files)/2:]
-for src, gen in zip(srcs, gens):
- py_compile.compile(file=src, cfile=gen, doraise=True)
-")
diff --git a/gr-aistx/cmake/Modules/GrSwig.cmake b/gr-aistx/cmake/Modules/GrSwig.cmake
deleted file mode 100644
index 6ba5ee3..0000000
--- a/gr-aistx/cmake/Modules/GrSwig.cmake
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_SWIG_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_SWIG_CMAKE TRUE)
-
-include(GrPython)
-
-########################################################################
-# Builds a swig documentation file to be generated into python docstrings
-# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....)
-#
-# Set the following variable to specify extra dependent targets:
-# - GR_SWIG_DOCS_SOURCE_DEPS
-# - GR_SWIG_DOCS_TARGET_DEPS
-########################################################################
-function(GR_SWIG_MAKE_DOCS output_file)
- find_package(Doxygen)
- if(DOXYGEN_FOUND)
-
- #setup the input files variable list, quote formated
- set(input_files)
- unset(INPUT_PATHS)
- foreach(input_path ${ARGN})
- if (IS_DIRECTORY ${input_path}) #when input path is a directory
- file(GLOB input_path_h_files ${input_path}/*.h)
- else() #otherwise its just a file, no glob
- set(input_path_h_files ${input_path})
- endif()
- list(APPEND input_files ${input_path_h_files})
- set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"")
- endforeach(input_path)
-
- #determine the output directory
- get_filename_component(name ${output_file} NAME_WE)
- get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH)
- set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs)
- make_directory(${OUTPUT_DIRECTORY})
-
- #generate the Doxyfile used by doxygen
- configure_file(
- ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in
- ${OUTPUT_DIRECTORY}/Doxyfile
- @ONLY)
-
- #Create a dummy custom command that depends on other targets
- include(GrMiscUtils)
- GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS})
-
- #call doxygen on the Doxyfile + input headers
- add_custom_command(
- OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml
- DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps}
- COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile
- COMMENT "Generating doxygen xml for ${name} docs"
- )
-
- #call the swig_doc script on the xml files
- add_custom_command(
- OUTPUT ${output_file}
- DEPENDS ${input_files} ${OUTPUT_DIRECTORY}/xml/index.xml
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py
- ${OUTPUT_DIRECTORY}/xml
- ${output_file}
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen
- )
-
- else(DOXYGEN_FOUND)
- file(WRITE ${output_file} "\n") #no doxygen -> empty file
- endif(DOXYGEN_FOUND)
-endfunction(GR_SWIG_MAKE_DOCS)
-
-########################################################################
-# Build a swig target for the common gnuradio use case. Usage:
-# GR_SWIG_MAKE(target ifile ifile ifile...)
-#
-# Set the following variables before calling:
-# - GR_SWIG_FLAGS
-# - GR_SWIG_INCLUDE_DIRS
-# - GR_SWIG_LIBRARIES
-# - GR_SWIG_SOURCE_DEPS
-# - GR_SWIG_TARGET_DEPS
-# - GR_SWIG_DOC_FILE
-# - GR_SWIG_DOC_DIRS
-########################################################################
-macro(GR_SWIG_MAKE name)
- set(ifiles ${ARGN})
-
- #do swig doc generation if specified
- if (GR_SWIG_DOC_FILE)
- set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS})
- set(GR_SWIG_DOCS_TAREGT_DEPS ${GR_SWIG_TARGET_DEPS})
- GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS})
- list(APPEND GR_SWIG_SOURCE_DEPS ${GR_SWIG_DOC_FILE})
- endif()
-
- #append additional include directories
- find_package(PythonLibs)
- list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs)
- list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS})
- list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR})
- list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR})
-
- #determine include dependencies for swig file
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE}
- ${CMAKE_BINARY_DIR}/get_swig_deps.py
- "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}"
- OUTPUT_STRIP_TRAILING_WHITESPACE
- OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- )
-
- #Create a dummy custom command that depends on other targets
- include(GrMiscUtils)
- GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS})
- set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag)
- add_custom_command(
- OUTPUT ${tag_file}
- DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps}
- COMMAND ${CMAKE_COMMAND} -E touch ${tag_file}
- )
-
- #append the specified include directories
- include_directories(${GR_SWIG_INCLUDE_DIRS})
- list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file})
-
- #setup the swig flags with flags and include directories
- set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS})
- foreach(dir ${GR_SWIG_INCLUDE_DIRS})
- list(APPEND CMAKE_SWIG_FLAGS "-I${dir}")
- endforeach(dir)
-
- #set the C++ property on the swig .i file so it builds
- set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON)
-
- #setup the actual swig library target to be built
- include(UseSWIG)
- SWIG_ADD_MODULE(${name} python ${ifiles})
- SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES})
-
-endmacro(GR_SWIG_MAKE)
-
-########################################################################
-# Install swig targets generated by GR_SWIG_MAKE. Usage:
-# GR_SWIG_INSTALL(
-# TARGETS target target target...
-# [DESTINATION destination]
-# [COMPONENT component]
-# )
-########################################################################
-macro(GR_SWIG_INSTALL)
-
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN})
-
- foreach(name ${GR_SWIG_INSTALL_TARGETS})
- install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME}
- DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
- COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
- )
-
- include(GrPython)
- GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py
- DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
- COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
- )
-
- GR_LIBTOOL(
- TARGET ${SWIG_MODULE_${name}_REAL_NAME}
- DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
- )
-
- endforeach(name)
-
-endmacro(GR_SWIG_INSTALL)
-
-########################################################################
-# Generate a python file that can determine swig dependencies.
-# Used by the make macro above to determine extra dependencies.
-# When you build C++, CMake figures out the header dependencies.
-# This code essentially performs that logic for swig includes.
-########################################################################
-file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py "
-
-import os, sys, re
-
-include_matcher = re.compile('[#|%]include\\s*[<|\"](.*)[>|\"]')
-include_dirs = sys.argv[2].split(';')
-
-def get_swig_incs(file_path):
- file_contents = open(file_path, 'r').read()
- return include_matcher.findall(file_contents, re.MULTILINE)
-
-def get_swig_deps(file_path, level):
- deps = [file_path]
- if level == 0: return deps
- for inc_file in get_swig_incs(file_path):
- for inc_dir in include_dirs:
- inc_path = os.path.join(inc_dir, inc_file)
- if not os.path.exists(inc_path): continue
- deps.extend(get_swig_deps(inc_path, level-1))
- return deps
-
-if __name__ == '__main__':
- ifiles = sys.argv[1].split(';')
- deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], [])
- #sys.stderr.write(';'.join(set(deps)) + '\\n\\n')
- print(';'.join(set(deps)))
-")
diff --git a/gr-aistx/cmake/Modules/GrTest.cmake b/gr-aistx/cmake/Modules/GrTest.cmake
deleted file mode 100644
index 6174c03..0000000
--- a/gr-aistx/cmake/Modules/GrTest.cmake
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_TEST_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_TEST_CMAKE TRUE)
-
-########################################################################
-# Add a unit test and setup the environment for a unit test.
-# Takes the same arguments as the ADD_TEST function.
-#
-# Before calling set the following variables:
-# GR_TEST_TARGET_DEPS - built targets for the library path
-# GR_TEST_LIBRARY_DIRS - directories for the library path
-# GR_TEST_PYTHON_DIRS - directories for the python path
-########################################################################
-function(GR_ADD_TEST test_name)
-
- if(WIN32)
- #Ensure that the build exe also appears in the PATH.
- list(APPEND GR_TEST_TARGET_DEPS ${ARGN})
-
- #In the land of windows, all libraries must be in the PATH.
- #Since the dependent libraries are not yet installed,
- #we must manually set them in the PATH to run tests.
- #The following appends the path of a target dependency.
- foreach(target ${GR_TEST_TARGET_DEPS})
- get_target_property(location ${target} LOCATION)
- if(location)
- get_filename_component(path ${location} PATH)
- string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path})
- list(APPEND GR_TEST_LIBRARY_DIRS ${path})
- endif(location)
- endforeach(target)
-
- #SWIG generates the python library files into a subdirectory.
- #Therefore, we must append this subdirectory into PYTHONPATH.
- #Only do this for the python directories matching the following:
- foreach(pydir ${GR_TEST_PYTHON_DIRS})
- get_filename_component(name ${pydir} NAME)
- if(name MATCHES "^(swig|lib|src)$")
- list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE})
- endif()
- endforeach(pydir)
- endif(WIN32)
-
- file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir)
- file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list?
- file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list?
-
- set(environs "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}")
-
- #http://www.cmake.org/pipermail/cmake/2009-May/029464.html
- #Replaced this add test + set environs code with the shell script generation.
- #Its nicer to be able to manually run the shell script to diagnose problems.
- #ADD_TEST(${ARGV})
- #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}")
-
- if(UNIX)
- set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH")
- #set both LD and DYLD paths to cover multiple UNIX OS library paths
- list(APPEND libpath "$LD_LIBRARY_PATH" "$DYLD_LIBRARY_PATH")
- list(APPEND pypath "$PYTHONPATH")
-
- #replace list separator with the path separator
- string(REPLACE ";" ":" libpath "${libpath}")
- string(REPLACE ";" ":" pypath "${pypath}")
- list(APPEND environs "PATH=${binpath}" "LD_LIBRARY_PATH=${libpath}" "DYLD_LIBRARY_PATH=${libpath}" "PYTHONPATH=${pypath}")
-
- #generate a bat file that sets the environment and runs the test
- find_program(SHELL sh)
- set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh)
- file(WRITE ${sh_file} "#!${SHELL}\n")
- #each line sets an environment variable
- foreach(environ ${environs})
- file(APPEND ${sh_file} "export ${environ}\n")
- endforeach(environ)
- #load the command to run with its arguments
- foreach(arg ${ARGN})
- file(APPEND ${sh_file} "${arg} ")
- endforeach(arg)
- file(APPEND ${sh_file} "\n")
-
- #make the shell file executable
- execute_process(COMMAND chmod +x ${sh_file})
-
- add_test(${test_name} ${SHELL} ${sh_file})
-
- endif(UNIX)
-
- if(WIN32)
- list(APPEND libpath ${DLL_PATHS} "%PATH%")
- list(APPEND pypath "%PYTHONPATH%")
-
- #replace list separator with the path separator (escaped)
- string(REPLACE ";" "\\;" libpath "${libpath}")
- string(REPLACE ";" "\\;" pypath "${pypath}")
- list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}")
-
- #generate a bat file that sets the environment and runs the test
- set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat)
- file(WRITE ${bat_file} "@echo off\n")
- #each line sets an environment variable
- foreach(environ ${environs})
- file(APPEND ${bat_file} "SET ${environ}\n")
- endforeach(environ)
- #load the command to run with its arguments
- foreach(arg ${ARGN})
- file(APPEND ${bat_file} "${arg} ")
- endforeach(arg)
- file(APPEND ${bat_file} "\n")
-
- add_test(${test_name} ${bat_file})
- endif(WIN32)
-
-endfunction(GR_ADD_TEST)
diff --git a/gr-aistx/cmake/Modules/gnuradio-aistxConfig.cmake b/gr-aistx/cmake/Modules/gnuradio-aistxConfig.cmake
new file mode 100644
index 0000000..9768205
--- /dev/null
+++ b/gr-aistx/cmake/Modules/gnuradio-aistxConfig.cmake
@@ -0,0 +1,32 @@
+find_package(PkgConfig)
+
+PKG_CHECK_MODULES(PC_GR_AISTX gnuradio-aistx)
+
+FIND_PATH(
+ GR_AISTX_INCLUDE_DIRS
+ NAMES gnuradio/aistx/api.h
+ HINTS $ENV{AISTX_DIR}/include
+ ${PC_AISTX_INCLUDEDIR}
+ PATHS ${CMAKE_INSTALL_PREFIX}/include
+ /usr/local/include
+ /usr/include
+)
+
+FIND_LIBRARY(
+ GR_AISTX_LIBRARIES
+ NAMES gnuradio-aistx
+ HINTS $ENV{AISTX_DIR}/lib
+ ${PC_AISTX_LIBDIR}
+ PATHS ${CMAKE_INSTALL_PREFIX}/lib
+ ${CMAKE_INSTALL_PREFIX}/lib64
+ /usr/local/lib
+ /usr/local/lib64
+ /usr/lib
+ /usr/lib64
+ )
+
+include("${CMAKE_CURRENT_LIST_DIR}/gnuradio-aistxTarget.cmake")
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GR_AISTX DEFAULT_MSG GR_AISTX_LIBRARIES GR_AISTX_INCLUDE_DIRS)
+MARK_AS_ADVANCED(GR_AISTX_LIBRARIES GR_AISTX_INCLUDE_DIRS)
diff --git a/gr-aistx/cmake/Modules/targetConfig.cmake.in b/gr-aistx/cmake/Modules/targetConfig.cmake.in
new file mode 100644
index 0000000..4a1fb31
--- /dev/null
+++ b/gr-aistx/cmake/Modules/targetConfig.cmake.in
@@ -0,0 +1,14 @@
+# Copyright 2018 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+include(CMakeFindDependencyMacro)
+
+set(target_deps "@TARGET_DEPENDENCIES@")
+foreach(dep IN LISTS target_deps)
+ find_dependency(${dep})
+endforeach()
+include("${CMAKE_CURRENT_LIST_DIR}/@TARGET@Targets.cmake")
diff --git a/gr-aistx/docs/CMakeLists.txt b/gr-aistx/docs/CMakeLists.txt
index f16fbf6..837a6ba 100644
--- a/gr-aistx/docs/CMakeLists.txt
+++ b/gr-aistx/docs/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Setup dependencies
diff --git a/gr-aistx/docs/README.AISTX b/gr-aistx/docs/README.AISTX
deleted file mode 100644
index 90c9c74..0000000
--- a/gr-aistx/docs/README.AISTX
+++ /dev/null
@@ -1,11 +0,0 @@
-This is the AISTX-write-a-block package meant as a guide to building
-out-of-tree packages. To use the AISTX blocks, the Python namespaces
-is in 'AISTX', which is imported as:
-
- import AISTX
-
-See the Doxygen documentation for details about the blocks available
-in this package. A quick listing of the details can be found in Python
-after importing by using:
-
- help(AISTX)
diff --git a/gr-aistx/docs/README.aistx b/gr-aistx/docs/README.aistx
new file mode 100644
index 0000000..3f2e233
--- /dev/null
+++ b/gr-aistx/docs/README.aistx
@@ -0,0 +1,11 @@
+This is the aistx-write-a-block package meant as a guide to building
+out-of-tree packages. To use the aistx blocks, the Python namespaces
+is in 'aistx', which is imported as:
+
+ import aistx
+
+See the Doxygen documentation for details about the blocks available
+in this package. A quick listing of the details can be found in Python
+after importing by using:
+
+ help(aistx)
diff --git a/gr-aistx/docs/doxygen/CMakeLists.txt b/gr-aistx/docs/doxygen/CMakeLists.txt
index 1b44799..5408046 100644
--- a/gr-aistx/docs/doxygen/CMakeLists.txt
+++ b/gr-aistx/docs/doxygen/CMakeLists.txt
@@ -1,33 +1,23 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Create the doxygen configuration file
########################################################################
-file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir)
-file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir)
-file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir)
-file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir)
+file(TO_NATIVE_PATH ${PROJECT_SOURCE_DIR} top_srcdir)
+file(TO_NATIVE_PATH ${PROJECT_BINARY_DIR} top_builddir)
+file(TO_NATIVE_PATH ${PROJECT_SOURCE_DIR} abs_top_srcdir)
+file(TO_NATIVE_PATH ${PROJECT_BINARY_DIR} abs_top_builddir)
set(HAVE_DOT ${DOXYGEN_DOT_FOUND})
set(enable_html_docs YES)
set(enable_latex_docs NO)
+set(enable_mathjax NO)
set(enable_xml_docs YES)
configure_file(
diff --git a/gr-aistx/docs/doxygen/Doxyfile.in b/gr-aistx/docs/doxygen/Doxyfile.in
index 0820d91..d3aef37 100644
--- a/gr-aistx/docs/doxygen/Doxyfile.in
+++ b/gr-aistx/docs/doxygen/Doxyfile.in
@@ -1,14 +1,16 @@
-# Doxyfile 1.5.7.1
+# Doxyfile 1.8.4
# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
+# doxygen (www.doxygen.org) for a project.
#
-# All text after a hash (#) is considered a comment and will be ignored
+# All text after a double hash (##) is considered a comment and is placed
+# in front of the TAG it is preceding .
+# All text after a hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists items can also be appended using:
# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
+# Values that contain spaces should be placed between quotes (" ").
#---------------------------------------------------------------------------
# Project related configuration options
@@ -22,8 +24,9 @@
DOXYFILE_ENCODING = UTF-8
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
-# by quotes) that should identify the project.
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
+# to put quotes around the project name if it contains spaces.
PROJECT_NAME = "GNU Radio's AISTX Package"
@@ -33,6 +36,19 @@ PROJECT_NAME = "GNU Radio's AISTX Package"
PROJECT_NUMBER =
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
+# a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
# If a relative path is entered, it will be relative to the location
@@ -54,11 +70,11 @@ CREATE_SUBDIRS = NO
# information to generate all constant output in the proper language.
# The default language is English, other supported languages are:
# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek,
-# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish,
-# Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, Slovene,
-# Spanish, Swedish, and Ukrainian.
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
+# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
+# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
OUTPUT_LANGUAGE = English
@@ -112,7 +128,9 @@ FULL_PATH_NAMES = NO
# only done if one of the specified strings matches the left-hand part of
# the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the
-# path to strip.
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
+# started.
STRIP_FROM_PATH =
@@ -126,7 +144,7 @@ STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful is your file systems
+# (but less readable) file names. This can be useful if your file system
# doesn't support long names like on DOS, Mac, or CD-ROM.
SHORT_NAMES = NO
@@ -207,11 +225,40 @@ OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# Disable only in case of backward compatibilities issues.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+
+AUTOLINK_SUPPORT = YES
+
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should
# set this tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also make the inheritance and collaboration
+# func(std::string) {}). This also makes the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
BUILTIN_STL_SUPPORT = YES
@@ -227,10 +274,10 @@ CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen to replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES (the
+# default) will make doxygen replace the get and set methods by a property in
+# the documentation. This will only work if the methods are indeed getting or
# setting a simple type. If this is not the case, or you want to show the
# methods anyway, you should set this option to NO.
@@ -251,6 +298,22 @@ DISTRIBUTE_GROUP_DOC = NO
SUBGROUPING = YES
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# section (for LaTeX and RTF).
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields or simple typedef fields will be shown
+# inline in the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO (the default), structs, classes, and unions are shown on a separate
+# page (for HTML and Man pages) or section (for LaTeX and RTF).
+
+INLINE_SIMPLE_STRUCTS = NO
+
# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
# is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
@@ -261,21 +324,16 @@ SUBGROUPING = YES
TYPEDEF_HIDES_STRUCT = NO
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penality.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will rougly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols
-
-SYMBOL_CACHE_SIZE = 4
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can
+# be an expensive process and often the same symbol appear multiple times in
+# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
+# small doxygen will become slower. If the cache is too large, memory is wasted.
+# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
+# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
+# symbols.
+
+LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
@@ -284,7 +342,7 @@ SYMBOL_CACHE_SIZE = 4
# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
# documentation are documented, even if no documentation was available.
# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
EXTRACT_ALL = YES
@@ -293,6 +351,11 @@ EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+
+EXTRACT_PACKAGE = NO
+
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
@@ -315,7 +378,7 @@ EXTRACT_LOCAL_METHODS = NO
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base
# name of the file that contains the anonymous namespace. By default
-# anonymous namespace are hidden.
+# anonymous namespaces are hidden.
EXTRACT_ANON_NSPACES = NO
@@ -375,6 +438,12 @@ HIDE_SCOPE_NAMES = NO
SHOW_INCLUDE_FILES = YES
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
# is inserted in the documentation for inline members.
@@ -394,6 +463,16 @@ SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
# hierarchy of group names into alphabetical order. If set to NO (the default)
# the group names will appear in their defined order.
@@ -410,6 +489,15 @@ SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = NO
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
# commands in the documentation.
@@ -435,15 +523,16 @@ GENERATE_BUGLIST = NO
GENERATE_DEPRECATEDLIST= NO
# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
+# documentation sections, marked by \if section-label ... \endif
+# and \cond section-label ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or define consists of for it to appear in
+# the initial value of a variable or macro consists of for it to appear in
# the documentation. If the initializer consists of more lines than specified
# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and defines in the
+# The appearance of the initializer of individual variables and macros in the
# documentation can be controlled using \showinitializer or \hideinitializer
# command in the documentation regardless of this setting.
@@ -455,12 +544,6 @@ MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
-# If the sources in your project are distributed over multiple directories
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES = NO
-
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
@@ -468,7 +551,8 @@ SHOW_DIRECTORIES = NO
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page. This will remove the Namespaces entry from the Quick Index
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
# and from the Folder Tree View (if specified). The default is YES.
SHOW_NAMESPACES = NO
@@ -483,15 +567,26 @@ SHOW_NAMESPACES = NO
FILE_VERSION_FILTER =
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
-# doxygen. The layout file controls the global structure of the generated output files
-# in an output format independent way. The create the layout file that represents
-# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
-# file name after the option, if omitted DoxygenLayout.xml will be used as the name
-# of the layout file.
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
LAYOUT_FILE =
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# feature you need bibtex and perl available in the search path. Do not use
+# file names with spaces, bibtex cannot handle them.
+
+CITE_BIB_FILES =
+
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
@@ -520,7 +615,7 @@ WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
-# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
# functions that are documented, but have no documentation for their parameters
# or return value. If set to NO (the default) doxygen will only warn about
# wrong or incomplete parameter documentation, but not about the absence of
@@ -552,7 +647,8 @@ WARN_LOGFILE =
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
-INPUT = @top_srcdir@ @top_builddir@
+INPUT = "@top_srcdir@" \
+ "@top_builddir@"
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
@@ -566,8 +662,9 @@ INPUT_ENCODING = UTF-8
# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
# and *.h) to filter out the source-files in the directories. If left
# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
-# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
FILE_PATTERNS = *.h \
*.dox
@@ -578,18 +675,20 @@ FILE_PATTERNS = *.h \
RECURSIVE = YES
-# The EXCLUDE tag can be used to specify files and/or directories that should
+# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
EXCLUDE = @abs_top_builddir@/docs/doxygen/html \
- @abs_top_builddir@/docs/doxygen/xml \
+ @abs_top_builddir@/docs/doxygen/xml \
@abs_top_builddir@/docs/doxygen/other/doxypy.py \
- @abs_top_builddir@/_CPack_Packages \
+ @abs_top_builddir@/_CPack_Packages \
@abs_top_srcdir@/cmake
-# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
-# directories that are symbolic links (a Unix filesystem feature) are excluded
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
EXCLUDE_SYMLINKS = NO
@@ -617,8 +716,6 @@ EXCLUDE_PATTERNS = */.deps/* \
EXCLUDE_SYMBOLS = ad9862 \
numpy \
- *swig* \
- *Swig* \
*my_top_block* \
*my_graph* \
*app_top_block* \
@@ -667,19 +764,24 @@ IMAGE_PATH =
# by executing (via popen()) the command , where
# is the value of the INPUT_FILTER tag, and is the name of an
# input file. Doxygen will then use the output that the filter program writes
-# to standard output. If FILTER_PATTERNS is specified, this tag will be
-# ignored.
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be ignored.
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form:
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
-# is applied to all files.
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
-FILTER_PATTERNS = *.py=@top_srcdir@/gnuradio-core/doc/other/doxypy.py
+FILTER_PATTERNS = *.py=@top_srcdir@/docs/doxygen/other/doxypy.py
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will be used to filter the input files when producing source
@@ -687,6 +789,21 @@ FILTER_PATTERNS = *.py=@top_srcdir@/gnuradio-core/doc/other/doxypy.py
FILTER_SOURCE_FILES = NO
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
#---------------------------------------------------------------------------
# configuration options related to source browsing
#---------------------------------------------------------------------------
@@ -705,7 +822,7 @@ INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C and C++ comments will always remain visible.
+# fragments. Normal C, C++ and Fortran comments will always remain visible.
STRIP_CODE_COMMENTS = NO
@@ -724,7 +841,8 @@ REFERENCES_RELATION = YES
# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code. Otherwise they will link to the documentstion.
+# link to the source code.
+# Otherwise they will link to the documentation.
REFERENCES_LINK_SOURCE = YES
@@ -752,12 +870,6 @@ VERBATIM_HEADERS = YES
ALPHABETICAL_INDEX = YES
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
# In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index.
# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
@@ -788,7 +900,14 @@ HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a personal HTML header for
# each generated HTML page. If it is left blank doxygen will generate a
-# standard header.
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
+# changing the value of configuration settings such as GENERATE_TREEVIEW!
HTML_HEADER =
@@ -800,27 +919,80 @@ HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# stylesheet in the HTML output directory as well, or it will be erased!
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# tag will in the future become obsolete.
HTML_STYLESHEET =
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
-# files or namespaces will be aligned in HTML using tables. If set to
-# NO a bullet list will be used.
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefore more
+# robust against future updates. Doxygen will copy the style sheet file to
+# the output directory.
-HTML_ALIGN_MEMBERS = YES
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# the files will be copied as-is; there are no commands or markers available.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
-# page has loaded. For this to work a browser that supports
-# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
-# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+# page has loaded.
HTML_DYNAMIC_SECTIONS = NO
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
+# and will result in a full expanded tree by default.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
# If the GENERATE_DOCSET tag is set to YES, additional index files
# will be generated that can be used as input for Apple's Xcode 3
# integrated development environment, introduced with OSX 10.5 (Leopard).
@@ -829,7 +1001,8 @@ HTML_DYNAMIC_SECTIONS = NO
# directory and running "make install" will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
GENERATE_DOCSET = NO
@@ -847,6 +1020,16 @@ DOCSET_FEEDNAME = "Doxygen generated docs"
DOCSET_BUNDLE_ID = org.doxygen.Project
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
+# style string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
@@ -891,10 +1074,10 @@ BINARY_TOC = NO
TOC_EXPAND = YES
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
-# are set, an additional index file will be generated that can be used as input for
-# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
-# HTML documentation.
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
GENERATE_QHP = NO
@@ -906,57 +1089,99 @@ QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
-# Qt Help Project / Namespace.
+# http://doc.trolltech.com/qthelpproject.html#namespace
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
-# Qt Help Project / Virtual Folders.
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
QHP_VIRTUAL_FOLDER = doc
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+#
+# Qt Help Project / Custom Filters.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+#
+# Qt Help Project / Filter Attributes.
+
+QHP_SECT_FILTER_ATTRS =
+
# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
# be used to specify the location of Qt's qhelpgenerator.
# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file .
+# .qhp file.
QHG_LOCATION =
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
-# top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it.
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
-DISABLE_INDEX = YES
+GENERATE_ECLIPSEHELP = NO
-# This tag can be used to set the number of enum values (range [1..20])
-# that doxygen will group on one line in the generated HTML documentation.
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
-ENUM_VALUES_PER_LINE = 4
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
+# GENERATE_TREEVIEW to YES.
+
+DISABLE_INDEX = YES
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information.
-# If the tag value is set to FRAME, a side panel will be generated
+# If the tag value is set to YES, a side panel will be generated
# containing a tree-like index structure (just like the one that
# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
-# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
-# probably better off using the HTML help feature. Other possible values
-# for this tag are: HIERARCHIES, which will generate the Groups, Directories,
-# and Class Hierarchy pages using a tree view instead of an ordered list;
-# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which
-# disables this behavior completely. For backwards compatibility with previous
-# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE
-# respectively.
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
+# could consider to set DISABLE_INDEX to NO when enabling this option.
GENERATE_TREEVIEW = YES
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
+# values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
# used to set the initial width (in pixels) of the frame in which the tree
# is shown.
TREEVIEW_WIDTH = 180
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
# Use this tag to change the font size of Latex formulas included
# as images in the HTML documentation. The default is 10. Note that
# when you change the font size after a successful doxygen run you need
@@ -965,6 +1190,112 @@ TREEVIEW_WIDTH = 180
FORMULA_FONTSIZE = 10
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = @enable_mathjax@
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
+# SVG. The default value is HTML-CSS, which is slower, but has the best
+# compatibility.
+
+MATHJAX_FORMAT = SVG
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax.
+# However, it is strongly recommended to install a local
+# copy of MathJax from http://www.mathjax.org before deployment.
+
+MATHJAX_RELPATH = @MATHJAX2_PATH@
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# names that should be enabled during MathJax rendering.
+
+MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
+# pieces of code that will be used on startup of the MathJax code.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript.
+# There are two flavours of web server based search depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools.
+# See the manual for details.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain
+# the search results. Doxygen ships with an example indexer (doxyindexer) and
+# search engine (doxysearch.cgi) which are based on the open source search
+# engine library Xapian. See the manual for configuration details.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will returned the search results when EXTERNAL_SEARCH is enabled.
+# Doxygen ships with an example search engine (doxysearch) which is based on
+# the open source search engine library Xapian. See the manual for configuration
+# details.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
+# of to a relative location where the documentation can be found.
+# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
+
+EXTRA_SEARCH_MAPPINGS =
+
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
@@ -982,6 +1313,9 @@ LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
LATEX_CMD_NAME = latex
@@ -998,8 +1332,8 @@ MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, a4wide, letter, legal and
-# executive. If left blank a4wide will be used.
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4 will be used.
PAPER_TYPE = letter
@@ -1015,6 +1349,20 @@ EXTRA_PACKAGES =
LATEX_HEADER =
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
+# standard footer. Notice: only use this tag if you know what you are doing!
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
+# or other source files which should be copied to the LaTeX output directory.
+# Note that the files will be copied as-is; there are no commands or markers
+# available.
+
+LATEX_EXTRA_FILES =
+
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
# is prepared for conversion to pdf (using ps2pdf). The pdf file will
# contain links (just like the HTML output) instead of page references
@@ -1041,6 +1389,19 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# http://en.wikipedia.org/wiki/BibTeX for more info.
+
+LATEX_BIB_STYLE = plain
+
#---------------------------------------------------------------------------
# configuration options related to the RTF output
#---------------------------------------------------------------------------
@@ -1072,7 +1433,7 @@ COMPACT_RTF = NO
RTF_HYPERLINKS = NO
-# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# Load style sheet definitions from file. Syntax is similar to doxygen's
# config file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
@@ -1127,18 +1488,6 @@ GENERATE_XML = @enable_xml_docs@
XML_OUTPUT = xml
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD =
-
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
# dump the program listings (including syntax highlighting
# and cross-referencing information) to the XML output. Note that
@@ -1146,6 +1495,21 @@ XML_DTD =
XML_PROGRAMLISTING = NO
+#---------------------------------------------------------------------------
+# configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
+# that can be used to generate PDF.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it. If left blank docbook will be used as the default path.
+
+DOCBOOK_OUTPUT = docbook
+
#---------------------------------------------------------------------------
# configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
@@ -1177,8 +1541,10 @@ GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader. This is useful
-# if you want to understand what is going on. On the other hand, if this
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
# tag is set to NO the size of the Perl module output will be much smaller
# and Perl will parse it just the same.
@@ -1215,7 +1581,7 @@ MACRO_EXPANSION = NO
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
+# pointed to by INCLUDE_PATH will be searched when a #include is found.
SEARCH_INCLUDES = YES
@@ -1245,15 +1611,15 @@ PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition.
+# Use the PREDEFINED tag if you want to use a different macro definition that
+# overrules the definition found in the source code.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all function-like macros that are alone
-# on a line, have an all uppercase name, and do not end with a semicolon. Such
-# function macros are typically used for boiler-plate code, and will confuse
-# the parser if not removed.
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
SKIP_FUNCTION_MACROS = YES
@@ -1261,20 +1627,18 @@ SKIP_FUNCTION_MACROS = YES
# Configuration::additions related to external references
#---------------------------------------------------------------------------
-# The TAGFILES option can be used to specify one or more tagfiles.
-# Optionally an initial location of the external documentation
-# can be added for each tagfile. The format of a tag file without
-# this location is as follows:
-# TAGFILES = file1 file2 ...
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+#
+# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths or
-# URLs. If a location is present for each tag, the installdox tool
-# does not have to be run to correct the links.
-# Note that each tag file must have a unique name
-# (where the name does NOT include the path)
-# If a tag file is not located in the directory in which doxygen
-# is run, you must also specify the path to the tagfile here.
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
+# doxygen is run, you must also specify the path to the tagfile here.
TAGFILES =
@@ -1295,10 +1659,11 @@ ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
+# in the related pages index. If set to NO, only the current project's
+# pages will be listed.
-PERL_PATH = /usr/bin/perl
+EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
@@ -1307,21 +1672,11 @@ PERL_PATH = /usr/bin/perl
# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option is superseded by the HAVE_DOT option below. This is only a
-# fallback. It is recommended to install and use dot, since it yields more
-# powerful graphs.
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
CLASS_DIAGRAMS = YES
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
# If set to YES, the inheritance and collaboration graphs will hide
# inheritance and usage relations if the target is undocumented
# or is not a class.
@@ -1335,33 +1690,38 @@ HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = @HAVE_DOT@
-# By default doxygen will write a font called FreeSans.ttf to the output
-# directory and reference it in all dot files that doxygen generates. This
-# font does not include all possible unicode characters however, so when you need
-# these (or just want a differently looking font) you can specify the font name
-# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
-# which can be done by putting it in a standard location or by setting the
-# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
-# containing the font.
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font.
-DOT_FONTNAME = FreeSans
+DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
# The default size is 10pt.
DOT_FONTSIZE = 10
-# By default doxygen will tell dot to use the output directory to look for the
-# FreeSans.ttf font (which doxygen will put there itself). If you specify a
-# different font using DOT_FONTNAME you can set the path where dot
-# can find it using this tag.
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# set the path where dot can find it.
DOT_FONTPATH =
# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect inheritance relations. Setting this tag to YES will force the
-# the CLASS_DIAGRAMS tag to NO.
+# CLASS_DIAGRAMS tag to NO.
CLASS_GRAPH = YES
@@ -1383,6 +1743,15 @@ GROUP_GRAPHS = YES
UML_LOOK = NO
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# manageable. Set this to 0 for no limit. Note that the threshold may be
+# exceeded by 50% before the limit is enforced.
+
+UML_LIMIT_NUM_FIELDS = 10
+
# If set to YES, the inheritance and collaboration graphs will show the
# relations between templates and their instances.
@@ -1419,11 +1788,11 @@ CALL_GRAPH = NO
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will graphical hierarchy of all classes instead of a textual one.
+# will generate a graphical hierarchy of all classes instead of a textual one.
GRAPHICAL_HIERARCHY = YES
-# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
# then doxygen will show the dependencies a directory has on other directories
# in a graphical way. The dependency relations are determined by the #include
# relations between the files in the directories.
@@ -1431,10 +1800,21 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are png, jpg, or gif
-# If left blank png will be used.
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible in IE 9+ (other browsers do not have this requirement).
+
+DOT_IMAGE_FORMAT = svg
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible. Older versions of IE do not have SVG support.
-DOT_IMAGE_FORMAT = png
+INTERACTIVE_SVG = NO
# The tag DOT_PATH can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
@@ -1447,6 +1827,12 @@ DOT_PATH =
DOTFILE_DIRS =
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
# nodes that will be shown in the graph. If the number of nodes in a graph
# becomes larger than this value, doxygen will truncate the graph, which is
@@ -1493,12 +1879,3 @@ GENERATE_LEGEND = YES
# the various graphs.
DOT_CLEANUP = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to the search engine
-#---------------------------------------------------------------------------
-
-# The SEARCHENGINE tag specifies whether or not a search engine should be
-# used. If set to NO the values of all tags below this one will be ignored.
-
-SEARCHENGINE = NO
diff --git a/gr-aistx/docs/doxygen/Doxyfile.swig_doc.in b/gr-aistx/docs/doxygen/Doxyfile.swig_doc.in
deleted file mode 100644
index 50b8aa8..0000000
--- a/gr-aistx/docs/doxygen/Doxyfile.swig_doc.in
+++ /dev/null
@@ -1,1514 +0,0 @@
-# Doxyfile 1.6.1
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
-#
-# All text after a hash (#) is considered a comment and will be ignored
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
-# by quotes) that should identify the project.
-
-PROJECT_NAME = @CPACK_PACKAGE_NAME@
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful is your file systems
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it parses.
-# With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this tag.
-# The format is ext=language, where ext is a file extension, and language is one of
-# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP,
-# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat
-# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran),
-# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = YES
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen to replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penality.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will rougly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols
-
-SYMBOL_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespace are hidden.
-
-EXTRACT_ANON_NSPACES = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or define consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and defines in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# If the sources in your project are distributed over multiple directories
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES = NO
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command , where is the value of
-# the FILE_VERSION_FILTER tag, and is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
-# doxygen. The layout file controls the global structure of the generated output files
-# in an output format independent way. The create the layout file that represents
-# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
-# file name after the option, if omitted DoxygenLayout.xml will be used as the name
-# of the layout file.
-
-LAYOUT_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = YES
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# This WARN_NO_PARAMDOC option can be abled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = @INPUT_PATHS@
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
-# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
-
-FILE_PATTERNS = *.h
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
-# directories that are symbolic links (a Unix filesystem feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command , where
-# is the value of the INPUT_FILTER tag, and is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
-# is applied to all files.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C and C++ comments will always remain visible.
-
-STRIP_CODE_COMMENTS = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = NO
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header.
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# stylesheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET =
-
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
-# files or namespaces will be aligned in HTML using tables. If set to
-# NO a bullet list will be used.
-
-HTML_ALIGN_MEMBERS = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded. For this to work a browser that supports
-# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
-# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
-
-GENERATE_DOCSET = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
-# are set, an additional index file will be generated that can be used as input for
-# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
-# HTML documentation.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add.
-# For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see
-# Qt Help Project / Custom Filters.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's
-# filter section matches.
-# Qt Help Project / Filter Attributes.
-
-QHP_SECT_FILTER_ATTRS =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION =
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
-# top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it.
-
-DISABLE_INDEX = NO
-
-# This tag can be used to set the number of enum values (range [1..20])
-# that doxygen will group on one line in the generated HTML documentation.
-
-ENUM_VALUES_PER_LINE = 4
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-
-GENERATE_TREEVIEW = NO
-
-# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
-# and Class Hierarchy pages using a tree view instead of an ordered list.
-
-USE_INLINE_TREES = NO
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE = 10
-
-# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP)
-# there is already a search function so this one should typically
-# be disabled.
-
-SEARCHENGINE = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, a4wide, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML = YES
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all function-like macros that are alone
-# on a line, have an all uppercase name, and do not end with a semicolon. Such
-# function macros are typically used for boiler-plate code, and will confuse
-# the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles.
-# Optionally an initial location of the external documentation
-# can be added for each tagfile. The format of a tag file without
-# this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths or
-# URLs. If a location is present for each tag, the installdox tool
-# does not have to be run to correct the links.
-# Note that each tag file must have a unique name
-# (where the name does NOT include the path)
-# If a tag file is not located in the directory in which doxygen
-# is run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option is superseded by the HAVE_DOT option below. This is only a
-# fallback. It is recommended to install and use dot, since it yields more
-# powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = NO
-
-# By default doxygen will write a font called FreeSans.ttf to the output
-# directory and reference it in all dot files that doxygen generates. This
-# font does not include all possible unicode characters however, so when you need
-# these (or just want a differently looking font) you can specify the font name
-# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
-# which can be done by putting it in a standard location or by setting the
-# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
-# containing the font.
-
-DOT_FONTNAME = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the output directory to look for the
-# FreeSans.ttf font (which doxygen will put there itself). If you specify a
-# different font using DOT_FONTNAME you can set the path where dot
-# can find it using this tag.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# the CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK = NO
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH = NO
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are png, jpg, or gif
-# If left blank png will be used.
-
-DOT_IMAGE_FORMAT = png
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = YES
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
diff --git a/gr-aistx/docs/doxygen/doxyxml/__init__.py b/gr-aistx/docs/doxygen/doxyxml/__init__.py
index 5cd0b3c..527106c 100644
--- a/gr-aistx/docs/doxygen/doxyxml/__init__.py
+++ b/gr-aistx/docs/doxygen/doxyxml/__init__.py
@@ -1,22 +1,11 @@
#
# Copyright 2010 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
Python interface to contents of doxygen xml documentation.
@@ -64,7 +53,8 @@
"""
-from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
+from .doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
+
def _test():
import os
@@ -77,6 +67,6 @@ def _test():
import doctest
return doctest.testmod()
+
if __name__ == "__main__":
_test()
-
diff --git a/gr-aistx/docs/doxygen/doxyxml/__pycache__/__init__.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..b60a1e3
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/__pycache__/__init__.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/__pycache__/base.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000..dca54da
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/__pycache__/base.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/__pycache__/doxyindex.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/__pycache__/doxyindex.cpython-310.pyc
new file mode 100644
index 0000000..81ed6a3
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/__pycache__/doxyindex.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/__pycache__/text.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/__pycache__/text.cpython-310.pyc
new file mode 100644
index 0000000..b89f199
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/__pycache__/text.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/base.py b/gr-aistx/docs/doxygen/doxyxml/base.py
index e8f026a..da39d27 100644
--- a/gr-aistx/docs/doxygen/doxyxml/base.py
+++ b/gr-aistx/docs/doxygen/doxyxml/base.py
@@ -1,22 +1,11 @@
#
# Copyright 2010 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
A base class is created.
@@ -30,18 +19,18 @@
from xml.parsers.expat import ExpatError
-from generated import compound
+from .generated import compound
class Base(object):
- class Duplicate(StandardError):
+ class Duplicate(Exception):
pass
- class NoSuchMember(StandardError):
+ class NoSuchMember(Exception):
pass
- class ParsingError(StandardError):
+ class ParsingError(Exception):
pass
def __init__(self, parse_data, top=None):
@@ -94,19 +83,19 @@ def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
- raise StandardError(("Did not find a class for object '%s'." \
- % (mem.get_name())))
+ raise Exception(("Did not find a class for object '%s'."
+ % (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
- raise StandardError('No class matched this object.')
+ raise Exception('No class matched this object.')
self.add_ref(converted)
return converted
- except StandardError, e:
- print e
+ except Exception as e:
+ print(e)
@classmethod
def includes(cls, inst):
diff --git a/gr-aistx/docs/doxygen/doxyxml/doxyindex.py b/gr-aistx/docs/doxygen/doxyxml/doxyindex.py
index 0132ab8..8be3e83 100644
--- a/gr-aistx/docs/doxygen/doxyxml/doxyindex.py
+++ b/gr-aistx/docs/doxygen/doxyxml/doxyindex.py
@@ -1,22 +1,11 @@
#
# Copyright 2010 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
@@ -25,9 +14,10 @@
import os
-from generated import index
-from base import Base
-from text import description
+from .generated import index
+from .base import Base
+from .text import description
+
class DoxyIndex(Base):
"""
@@ -43,28 +33,22 @@ def _parse(self):
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
- # For files we want the contents to be accessible directly
- # from the parent rather than having to go through the file
- # object.
+ # For files and namespaces we want the contents to be
+ # accessible directly from the parent rather than having
+ # to go through the file object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
+ elif self.get_cls(mem) == DoxyNamespace:
+ self._members += converted.members()
+ self._members.append(converted)
else:
self._members.append(converted)
-def generate_swig_doc_i(self):
- """
- %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
- Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
- """
- pass
-
-
class DoxyCompMem(Base):
-
kind = None
def __init__(self, *args, **kwargs):
@@ -80,9 +64,27 @@ def set_descriptions(self, parse_data):
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
+ def set_parameters(self, data):
+ vs = [ddc.value for ddc in data.detaileddescription.content_]
+ pls = []
+ for v in vs:
+ if hasattr(v, 'parameterlist'):
+ pls += v.parameterlist
+ pis = []
+ for pl in pls:
+ pis += pl.parameteritem
+ dpis = []
+ for pi in pis:
+ dpi = DoxyParameterItem(pi)
+ dpi._parse()
+ dpis.append(dpi)
+ self._data['params'] = dpis
+
+
class DoxyCompound(DoxyCompMem):
pass
+
class DoxyMember(DoxyCompMem):
pass
@@ -98,15 +100,20 @@ def _parse(self):
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
- self._data['params'] = []
- prms = self._parse_data.param
- for prm in prms:
- self._data['params'].append(DoxyParam(prm))
+ self.set_parameters(self._parse_data)
+ if not self._data['params']:
+ # If the params weren't set by a comment then just grab the names.
+ self._data['params'] = []
+ prms = self._parse_data.param
+ for prm in prms:
+ self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
+
Base.mem_classes.append(DoxyFunction)
@@ -121,9 +128,41 @@ def _parse(self):
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
+ @property
+ def description(self):
+ descriptions = []
+ if self.brief_description:
+ descriptions.append(self.brief_description)
+ if self.detailed_description:
+ descriptions.append(self.detailed_description)
+ return '\n\n'.join(descriptions)
+
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
- declname = property(lambda self: self.data()['declname'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
+ name = property(lambda self: self.data()['declname'])
+
+
+class DoxyParameterItem(DoxyMember):
+ """A different representation of a parameter in Doxygen."""
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyParameterItem, self)._parse()
+ names = []
+ for nl in self._parse_data.parameternamelist:
+ for pn in nl.parametername:
+ names.append(description(pn))
+ # Just take first name
+ self._data['name'] = names[0]
+ # Get description
+ pd = description(self._parse_data.get_parameterdescription())
+ self._data['description'] = pd
+
+ description = property(lambda self: self.data()['description'])
+ name = property(lambda self: self.data()['name'])
+
class DoxyClass(DoxyCompound):
@@ -139,12 +178,16 @@ def _parse(self):
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
+ self.set_parameters(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
+ params = property(lambda self: self.data()['params'])
+
Base.mem_classes.append(DoxyClass)
@@ -166,7 +209,9 @@ def _parse(self):
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
+
Base.mem_classes.append(DoxyFile)
@@ -177,6 +222,17 @@ class DoxyNamespace(DoxyCompound):
kind = 'namespace'
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyNamespace, self)._parse()
+ self.retrieve_data()
+ self.set_descriptions(self._retrieved_data.compounddef)
+ if self._error:
+ return
+ self.process_memberdefs()
+
+
Base.mem_classes.append(DoxyNamespace)
@@ -220,6 +276,7 @@ class DoxyFriend(DoxyMember):
kind = 'friend'
+
Base.mem_classes.append(DoxyFriend)
@@ -227,11 +284,12 @@ class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
- kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
+ kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum',
+ 'dir', 'page', 'signal', 'slot', 'property'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
-Base.mem_classes.append(DoxyOther)
+Base.mem_classes.append(DoxyOther)
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/__init__.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..1dbd3ea
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/__init__.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/compound.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/compound.cpython-310.pyc
new file mode 100644
index 0000000..52b75c2
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/compound.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/compoundsuper.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/compoundsuper.cpython-310.pyc
new file mode 100644
index 0000000..dd5086f
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/compoundsuper.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/index.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/index.cpython-310.pyc
new file mode 100644
index 0000000..c08b158
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/index.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/indexsuper.cpython-310.pyc b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/indexsuper.cpython-310.pyc
new file mode 100644
index 0000000..66287af
Binary files /dev/null and b/gr-aistx/docs/doxygen/doxyxml/generated/__pycache__/indexsuper.cpython-310.pyc differ
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/compound.py b/gr-aistx/docs/doxygen/doxyxml/generated/compound.py
index 1522ac2..321328b 100644
--- a/gr-aistx/docs/doxygen/doxyxml/generated/compound.py
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/compound.py
@@ -4,14 +4,14 @@
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
-from string import lower as str_lower
+
from xml.dom import minidom
from xml.dom import Node
import sys
-import compoundsuper as supermod
-from compoundsuper import MixedContainer
+from . import compoundsuper as supermod
+from .compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
@@ -22,13 +22,15 @@ def find(self, details):
return self.compounddef.find(details)
+
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
- supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
+ supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass,
+ innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
def find(self, details):
@@ -48,13 +50,18 @@ def find(self, details):
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
+
+
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
- supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
+ supermod.memberRefType.__init__(
+ self, virt, prot, refid, ambiguityscope, scope, name)
+
+
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
@@ -62,6 +69,8 @@ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
+
+
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
@@ -69,6 +78,8 @@ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=No
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
+
+
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
@@ -76,6 +87,8 @@ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
class incTypeSub(supermod.incType):
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
+
+
supermod.incType.subclass = incTypeSub
# end class incTypeSub
@@ -83,23 +96,26 @@ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, conten
class refTypeSub(supermod.refType):
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
+
+
supermod.refType.subclass = refTypeSub
# end class refTypeSub
-
class refTextTypeSub(supermod.refTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
+
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
-class sectiondefTypeSub(supermod.sectiondefType):
+class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header='', description=None, memberdef=None):
- supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
+ supermod.sectiondefType.__init__(
+ self, kind, header, description, memberdef)
def find(self, details):
@@ -116,7 +132,10 @@ def find(self, details):
class memberdefTypeSub(supermod.memberdefType):
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
- supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
+ supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_,
+ definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
+
+
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
@@ -124,6 +143,8 @@ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=N
class descriptionTypeSub(supermod.descriptionType):
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
+
+
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
@@ -131,6 +152,8 @@ def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=N
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
+
+
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
@@ -138,13 +161,18 @@ def __init__(self, prot=None, id=None, name='', initializer=None, briefdescripti
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
+
+
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
- supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
+ supermod.paramType.__init__(
+ self, type_, declname, defname, array, defval, briefdescription)
+
+
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
@@ -152,6 +180,8 @@ def __init__(self, type_=None, declname='', defname='', array='', defval=None, b
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
+
+
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
@@ -159,6 +189,8 @@ def __init__(self, ref=None, mixedclass_=None, content_=None):
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
+
+
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
@@ -166,6 +198,8 @@ def __init__(self, node=None):
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
+
+
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
@@ -173,6 +207,8 @@ def __init__(self, id=None, label='', link=None, childnode=None):
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
+
+
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
@@ -180,6 +216,8 @@ def __init__(self, relation=None, refid=None, edgelabel=None):
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
+
+
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
@@ -187,13 +225,18 @@ def __init__(self, refid=None, external=None, valueOf_=''):
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
+
+
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
- supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
+ supermod.codelineType.__init__(
+ self, external, lineno, refkind, refid, highlight)
+
+
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
@@ -201,6 +244,8 @@ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlig
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
+
+
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
@@ -208,13 +253,18 @@ def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=No
class referenceTypeSub(supermod.referenceType):
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
+
+
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
- supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
+ supermod.locationType.__init__(
+ self, bodystart, line, bodyend, bodyfile, file)
+
+
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
@@ -222,6 +272,8 @@ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
@@ -229,6 +281,8 @@ def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixe
class docSect2TypeSub(supermod.docSect2Type):
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
@@ -236,6 +290,8 @@ def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixe
class docSect3TypeSub(supermod.docSect3Type):
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
@@ -243,6 +299,8 @@ def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixe
class docSect4TypeSub(supermod.docSect4Type):
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
@@ -250,6 +308,8 @@ def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
@@ -257,6 +317,8 @@ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
@@ -264,6 +326,8 @@ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
@@ -271,6 +335,8 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
@@ -278,6 +344,8 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
@@ -285,6 +353,8 @@ def __init__(self, para=None, mixedclass_=None, content_=None):
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
+
+
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
@@ -292,6 +362,8 @@ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
+
+
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
@@ -299,6 +371,8 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
+
+
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
@@ -306,6 +380,8 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
+
+
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
@@ -313,6 +389,8 @@ def __init__(self, primaryie='', secondaryie=''):
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
+
+
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
@@ -320,6 +398,8 @@ def __init__(self, listitem=None):
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
+
+
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
@@ -327,6 +407,8 @@ def __init__(self, para=None):
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
+
+
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
@@ -334,6 +416,8 @@ def __init__(self, kind=None, title=None, para=None):
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
+
+
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
@@ -341,6 +425,8 @@ def __init__(self, term=None):
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
+
+
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
@@ -348,6 +434,8 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
+
+
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
@@ -355,6 +443,8 @@ def __init__(self, rows=None, cols=None, row=None, caption=None):
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
+
+
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
@@ -362,6 +452,8 @@ def __init__(self, entry=None):
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
+
+
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
@@ -369,6 +461,8 @@ def __init__(self, thead=None, para=None):
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
+
+
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
@@ -376,6 +470,8 @@ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
class docImageTypeSub(supermod.docImageType):
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
+
+
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
@@ -383,6 +479,8 @@ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='',
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
+
+
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
@@ -390,6 +488,8 @@ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
+
+
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
@@ -397,6 +497,8 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
+
+
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
@@ -404,6 +506,8 @@ def __init__(self, tocitem=None):
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
+
+
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
@@ -411,13 +515,18 @@ def __init__(self, langid=None, para=None):
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
+
+
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
- supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
+ supermod.docParamListItem.__init__(
+ self, parameternamelist, parameterdescription)
+
+
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
@@ -425,6 +534,8 @@ def __init__(self, parameternamelist=None, parameterdescription=None):
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
+
+
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
@@ -432,6 +543,8 @@ def __init__(self, parametername=None):
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
+
+
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
@@ -439,6 +552,8 @@ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
+
+
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
@@ -446,6 +561,8 @@ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
+
+
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
@@ -453,9 +570,12 @@ def __init__(self, link=None, para=None, sect1=None, internal=None):
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
+
+
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
+
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
@@ -469,7 +589,7 @@ def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == "ref":
@@ -492,12 +612,9 @@ def buildChildren(self, child_, nodeName_):
# end class docParaTypeSub
-
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
-
-
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py b/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py
index 6255dda..40f548a 100644
--- a/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/compoundsuper.py
@@ -4,9 +4,9 @@
# Generated Thu Jun 11 18:44:25 2009 by generateDS.py.
#
+
import sys
-import getopt
-from string import lower as str_lower
+
from xml.dom import minidom
from xml.dom import Node
@@ -19,17 +19,21 @@
try:
from generatedssuper import GeneratedsSuper
-except ImportError, exp:
+except ImportError as exp:
- class GeneratedsSuper:
+ class GeneratedsSuper(object):
def format_string(self, input_data, input_name=''):
return input_data
+
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
+
def format_float(self, input_data, input_name=''):
return '%f' % input_data
+
def format_double(self, input_data, input_name=''):
return '%e' % input_data
+
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
@@ -41,9 +45,9 @@ def format_boolean(self, input_data, input_name=''):
## from IPython.Shell import IPShellEmbed
## args = ''
-## ipshell = IPShellEmbed(args,
+# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
-## exit_msg = 'Leaving Interpreter, back to program.')
+# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
@@ -59,20 +63,23 @@ def format_boolean(self, input_data, input_name=''):
# Support/utility functions.
#
+
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
+
def quote_xml(inStr):
- s1 = (isinstance(inStr, basestring) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
+
def quote_attrib(inStr):
- s1 = (isinstance(inStr, basestring) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
@@ -86,6 +93,7 @@ def quote_attrib(inStr):
s1 = '"%s"' % s1
return s1
+
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
@@ -102,7 +110,7 @@ def quote_python(inStr):
return '"""%s"""' % s1
-class MixedContainer:
+class MixedContainer(object):
# Constants for category:
CategoryNone = 0
CategoryText = 1
@@ -117,26 +125,33 @@ class MixedContainer:
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
+
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
+
def getCategory(self):
return self.category
+
def getContenttype(self, content_type):
return self.content_type
+
def getValue(self):
return self.value
+
def getName(self):
return self.name
+
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
- self.value.export(outfile, level, namespace,name)
+ self.value.export(outfile, level, namespace, name)
+
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s%s>' % (self.name, self.value, self.name))
@@ -148,19 +163,20 @@ def exportSimple(self, outfile, level, name):
outfile.write('<%s>%f%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g%s>' % (self.name, self.value, self.name))
+
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s",\n' % \
- (self.category, self.content_type, self.name,))
+ outfile.write('MixedContainer(%d, %d, "%s",\n' %
+ (self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
@@ -171,6 +187,7 @@ def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
+
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
@@ -186,9 +203,11 @@ def get_container(self): return self.container
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, version=None, compounddef=None):
self.version = version
self.compounddef = compounddef
+
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
@@ -199,6 +218,7 @@ def get_compounddef(self): return self.compounddef
def set_compounddef(self, compounddef): self.compounddef = compounddef
def get_version(self): return self.version
def set_version(self, version): self.version = version
+
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -210,27 +230,34 @@ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
outfile.write(' version=%s' % (quote_attrib(self.version), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
if self.compounddef:
- self.compounddef.export(outfile, level, namespace_, name_='compounddef')
+ self.compounddef.export(
+ outfile, level, namespace_, name_='compounddef')
+
def hasContent_(self):
if (
self.compounddef is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = "%s",\n' % (self.version,))
+
def exportLiteralChildren(self, outfile, level, name_):
if self.compounddef:
showIndent(outfile, level)
@@ -238,18 +265,21 @@ def exportLiteralChildren(self, outfile, level, name_):
self.compounddef.exportLiteral(outfile, level, name_='compounddef')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compounddef':
+ nodeName_ == 'compounddef':
obj_ = compounddefType.factory()
obj_.build(child_)
self.set_compounddef(obj_)
@@ -259,6 +289,7 @@ def buildChildren(self, child_, nodeName_):
class compounddefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
self.kind = kind
self.prot = prot
@@ -319,6 +350,7 @@ def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None,
self.programlisting = programlisting
self.location = location
self.listofallmembers = listofallmembers
+
def factory(*args_, **kwargs_):
if compounddefType.subclass:
return compounddefType.subclass(*args_, **kwargs_)
@@ -330,13 +362,23 @@ def set_compoundname(self, compoundname): self.compoundname = compoundname
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_basecompoundref(self): return self.basecompoundref
- def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref
+ def set_basecompoundref(
+ self, basecompoundref): self.basecompoundref = basecompoundref
+
def add_basecompoundref(self, value): self.basecompoundref.append(value)
- def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value
+ def insert_basecompoundref(
+ self, index, value): self.basecompoundref[index] = value
+
def get_derivedcompoundref(self): return self.derivedcompoundref
- def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
- def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value)
- def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value
+
+ def set_derivedcompoundref(
+ self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
+
+ def add_derivedcompoundref(
+ self, value): self.derivedcompoundref.append(value)
+ def insert_derivedcompoundref(
+ self, index, value): self.derivedcompoundref[index] = value
+
def get_includes(self): return self.includes
def set_includes(self, includes): self.includes = includes
def add_includes(self, value): self.includes.append(value)
@@ -348,7 +390,9 @@ def insert_includedby(self, index, value): self.includedby[index] = value
def get_incdepgraph(self): return self.incdepgraph
def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph
def get_invincdepgraph(self): return self.invincdepgraph
- def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph
+ def set_invincdepgraph(
+ self, invincdepgraph): self.invincdepgraph = invincdepgraph
+
def get_innerdir(self): return self.innerdir
def set_innerdir(self, innerdir): self.innerdir = innerdir
def add_innerdir(self, value): self.innerdir.append(value)
@@ -362,9 +406,13 @@ def set_innerclass(self, innerclass): self.innerclass = innerclass
def add_innerclass(self, value): self.innerclass.append(value)
def insert_innerclass(self, index, value): self.innerclass[index] = value
def get_innernamespace(self): return self.innernamespace
- def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace
+ def set_innernamespace(
+ self, innernamespace): self.innernamespace = innernamespace
+
def add_innernamespace(self, value): self.innernamespace.append(value)
- def insert_innernamespace(self, index, value): self.innernamespace[index] = value
+ def insert_innernamespace(
+ self, index, value): self.innernamespace[index] = value
+
def get_innerpage(self): return self.innerpage
def set_innerpage(self, innerpage): self.innerpage = innerpage
def add_innerpage(self, value): self.innerpage.append(value)
@@ -374,35 +422,51 @@ def set_innergroup(self, innergroup): self.innergroup = innergroup
def add_innergroup(self, value): self.innergroup.append(value)
def insert_innergroup(self, index, value): self.innergroup[index] = value
def get_templateparamlist(self): return self.templateparamlist
- def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def set_templateparamlist(
+ self, templateparamlist): self.templateparamlist = templateparamlist
+
def get_sectiondef(self): return self.sectiondef
def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef
def add_sectiondef(self, value): self.sectiondef.append(value)
def insert_sectiondef(self, index, value): self.sectiondef[index] = value
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def set_detaileddescription(
+ self, detaileddescription): self.detaileddescription = detaileddescription
+
def get_inheritancegraph(self): return self.inheritancegraph
- def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph
+ def set_inheritancegraph(
+ self, inheritancegraph): self.inheritancegraph = inheritancegraph
+
def get_collaborationgraph(self): return self.collaborationgraph
- def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph
+ def set_collaborationgraph(
+ self, collaborationgraph): self.collaborationgraph = collaborationgraph
+
def get_programlisting(self): return self.programlisting
- def set_programlisting(self, programlisting): self.programlisting = programlisting
+ def set_programlisting(
+ self, programlisting): self.programlisting = programlisting
+
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_listofallmembers(self): return self.listofallmembers
- def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers
+ def set_listofallmembers(
+ self, listofallmembers): self.listofallmembers = listofallmembers
+
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='compounddefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='compounddefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -410,32 +474,41 @@ def export(self, outfile, level, namespace_='', name_='compounddefType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'):
if self.compoundname is not None:
showIndent(outfile, level)
- outfile.write('<%scompoundname>%s%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
+ outfile.write('<%scompoundname>%s%scompoundname>\n' % (namespace_, self.format_string(
+ quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
if self.title is not None:
showIndent(outfile, level)
- outfile.write('<%stitle>%s%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
+ outfile.write('<%stitle>%s%stitle>\n' % (namespace_, self.format_string(
+ quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
for basecompoundref_ in self.basecompoundref:
- basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref')
+ basecompoundref_.export(
+ outfile, level, namespace_, name_='basecompoundref')
for derivedcompoundref_ in self.derivedcompoundref:
- derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref')
+ derivedcompoundref_.export(
+ outfile, level, namespace_, name_='derivedcompoundref')
for includes_ in self.includes:
includes_.export(outfile, level, namespace_, name_='includes')
for includedby_ in self.includedby:
includedby_.export(outfile, level, namespace_, name_='includedby')
if self.incdepgraph:
- self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph')
+ self.incdepgraph.export(
+ outfile, level, namespace_, name_='incdepgraph')
if self.invincdepgraph:
- self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph')
+ self.invincdepgraph.export(
+ outfile, level, namespace_, name_='invincdepgraph')
for innerdir_ in self.innerdir:
innerdir_.export(outfile, level, namespace_, name_='innerdir')
for innerfile_ in self.innerfile:
@@ -443,29 +516,38 @@ def exportChildren(self, outfile, level, namespace_='', name_='compounddefType')
for innerclass_ in self.innerclass:
innerclass_.export(outfile, level, namespace_, name_='innerclass')
for innernamespace_ in self.innernamespace:
- innernamespace_.export(outfile, level, namespace_, name_='innernamespace')
+ innernamespace_.export(
+ outfile, level, namespace_, name_='innernamespace')
for innerpage_ in self.innerpage:
innerpage_.export(outfile, level, namespace_, name_='innerpage')
for innergroup_ in self.innergroup:
innergroup_.export(outfile, level, namespace_, name_='innergroup')
if self.templateparamlist:
- self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ self.templateparamlist.export(
+ outfile, level, namespace_, name_='templateparamlist')
for sectiondef_ in self.sectiondef:
sectiondef_.export(outfile, level, namespace_, name_='sectiondef')
if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ self.briefdescription.export(
+ outfile, level, namespace_, name_='briefdescription')
if self.detaileddescription:
- self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ self.detaileddescription.export(
+ outfile, level, namespace_, name_='detaileddescription')
if self.inheritancegraph:
- self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph')
+ self.inheritancegraph.export(
+ outfile, level, namespace_, name_='inheritancegraph')
if self.collaborationgraph:
- self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph')
+ self.collaborationgraph.export(
+ outfile, level, namespace_, name_='collaborationgraph')
if self.programlisting:
- self.programlisting.export(outfile, level, namespace_, name_='programlisting')
+ self.programlisting.export(
+ outfile, level, namespace_, name_='programlisting')
if self.location:
self.location.export(outfile, level, namespace_, name_='location')
if self.listofallmembers:
- self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers')
+ self.listofallmembers.export(
+ outfile, level, namespace_, name_='listofallmembers')
+
def hasContent_(self):
if (
self.compoundname is not None or
@@ -491,15 +573,17 @@ def hasContent_(self):
self.programlisting is not None or
self.location is not None or
self.listofallmembers is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='compounddefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
@@ -510,9 +594,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding))
+ outfile.write('compoundname=%s,\n' % quote_python(
+ self.compoundname).encode(ExternalEncoding))
if self.title:
showIndent(outfile, level)
outfile.write('title=model_.xsd_string(\n')
@@ -525,7 +611,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for basecompoundref in self.basecompoundref:
showIndent(outfile, level)
outfile.write('model_.basecompoundref(\n')
- basecompoundref.exportLiteral(outfile, level, name_='basecompoundref')
+ basecompoundref.exportLiteral(
+ outfile, level, name_='basecompoundref')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -537,7 +624,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for derivedcompoundref in self.derivedcompoundref:
showIndent(outfile, level)
outfile.write('model_.derivedcompoundref(\n')
- derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref')
+ derivedcompoundref.exportLiteral(
+ outfile, level, name_='derivedcompoundref')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -576,7 +664,8 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.invincdepgraph:
showIndent(outfile, level)
outfile.write('invincdepgraph=model_.graphType(\n')
- self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph')
+ self.invincdepgraph.exportLiteral(
+ outfile, level, name_='invincdepgraph')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
@@ -621,7 +710,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for innernamespace in self.innernamespace:
showIndent(outfile, level)
outfile.write('model_.innernamespace(\n')
- innernamespace.exportLiteral(outfile, level, name_='innernamespace')
+ innernamespace.exportLiteral(
+ outfile, level, name_='innernamespace')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -654,7 +744,8 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.templateparamlist:
showIndent(outfile, level)
outfile.write('templateparamlist=model_.templateparamlistType(\n')
- self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ self.templateparamlist.exportLiteral(
+ outfile, level, name_='templateparamlist')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
@@ -672,31 +763,36 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ self.briefdescription.exportLiteral(
+ outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.detaileddescription:
showIndent(outfile, level)
outfile.write('detaileddescription=model_.descriptionType(\n')
- self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ self.detaileddescription.exportLiteral(
+ outfile, level, name_='detaileddescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.inheritancegraph:
showIndent(outfile, level)
outfile.write('inheritancegraph=model_.graphType(\n')
- self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph')
+ self.inheritancegraph.exportLiteral(
+ outfile, level, name_='inheritancegraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.collaborationgraph:
showIndent(outfile, level)
outfile.write('collaborationgraph=model_.graphType(\n')
- self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph')
+ self.collaborationgraph.exportLiteral(
+ outfile, level, name_='collaborationgraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.programlisting:
showIndent(outfile, level)
outfile.write('programlisting=model_.listingType(\n')
- self.programlisting.exportLiteral(outfile, level, name_='programlisting')
+ self.programlisting.exportLiteral(
+ outfile, level, name_='programlisting')
showIndent(outfile, level)
outfile.write('),\n')
if self.location:
@@ -708,15 +804,18 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.listofallmembers:
showIndent(outfile, level)
outfile.write('listofallmembers=model_.listofallmembersType(\n')
- self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers')
+ self.listofallmembers.exportLiteral(
+ outfile, level, name_='listofallmembers')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
@@ -724,120 +823,121 @@ def buildAttributes(self, attrs):
self.prot = attrs.get('prot').value
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compoundname':
+ nodeName_ == 'compoundname':
compoundname_ = ''
for text__content_ in child_.childNodes:
compoundname_ += text__content_.nodeValue
self.compoundname = compoundname_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_title(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'basecompoundref':
+ nodeName_ == 'basecompoundref':
obj_ = compoundRefType.factory()
obj_.build(child_)
self.basecompoundref.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'derivedcompoundref':
+ nodeName_ == 'derivedcompoundref':
obj_ = compoundRefType.factory()
obj_.build(child_)
self.derivedcompoundref.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'includes':
+ nodeName_ == 'includes':
obj_ = incType.factory()
obj_.build(child_)
self.includes.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'includedby':
+ nodeName_ == 'includedby':
obj_ = incType.factory()
obj_.build(child_)
self.includedby.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'incdepgraph':
+ nodeName_ == 'incdepgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_incdepgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'invincdepgraph':
+ nodeName_ == 'invincdepgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_invincdepgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerdir':
+ nodeName_ == 'innerdir':
obj_ = refType.factory()
obj_.build(child_)
self.innerdir.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerfile':
+ nodeName_ == 'innerfile':
obj_ = refType.factory()
obj_.build(child_)
self.innerfile.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerclass':
+ nodeName_ == 'innerclass':
obj_ = refType.factory()
obj_.build(child_)
self.innerclass.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innernamespace':
+ nodeName_ == 'innernamespace':
obj_ = refType.factory()
obj_.build(child_)
self.innernamespace.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerpage':
+ nodeName_ == 'innerpage':
obj_ = refType.factory()
obj_.build(child_)
self.innerpage.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innergroup':
+ nodeName_ == 'innergroup':
obj_ = refType.factory()
obj_.build(child_)
self.innergroup.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'templateparamlist':
+ nodeName_ == 'templateparamlist':
obj_ = templateparamlistType.factory()
obj_.build(child_)
self.set_templateparamlist(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sectiondef':
+ nodeName_ == 'sectiondef':
obj_ = sectiondefType.factory()
obj_.build(child_)
self.sectiondef.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
+ nodeName_ == 'detaileddescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'inheritancegraph':
+ nodeName_ == 'inheritancegraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_inheritancegraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'collaborationgraph':
+ nodeName_ == 'collaborationgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_collaborationgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'programlisting':
+ nodeName_ == 'programlisting':
obj_ = listingType.factory()
obj_.build(child_)
self.set_programlisting(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'location':
+ nodeName_ == 'location':
obj_ = locationType.factory()
obj_.build(child_)
self.set_location(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'listofallmembers':
+ nodeName_ == 'listofallmembers':
obj_ = listofallmembersType.factory()
obj_.build(child_)
self.set_listofallmembers(obj_)
@@ -847,11 +947,13 @@ def buildChildren(self, child_, nodeName_):
class listofallmembersType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, member=None):
if member is None:
self.member = []
else:
self.member = member
+
def factory(*args_, **kwargs_):
if listofallmembersType.subclass:
return listofallmembersType.subclass(*args_, **kwargs_)
@@ -862,10 +964,12 @@ def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
+
def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='listofallmembersType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -873,25 +977,31 @@ def export(self, outfile, level, namespace_='', name_='listofallmembersType', na
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
+
def hasContent_(self):
if (
self.member is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='listofallmembersType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('member=[\n')
@@ -905,17 +1015,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'member':
+ nodeName_ == 'member':
obj_ = memberRefType.factory()
obj_.build(child_)
self.member.append(obj_)
@@ -925,6 +1038,7 @@ def buildChildren(self, child_, nodeName_):
class memberRefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None):
self.virt = virt
self.prot = prot
@@ -932,6 +1046,7 @@ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=
self.ambiguityscope = ambiguityscope
self.scope = scope
self.name = name
+
def factory(*args_, **kwargs_):
if memberRefType.subclass:
return memberRefType.subclass(*args_, **kwargs_)
@@ -949,11 +1064,15 @@ def set_prot(self, prot): self.prot = prot
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_ambiguityscope(self): return self.ambiguityscope
- def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope
+
+ def set_ambiguityscope(
+ self, ambiguityscope): self.ambiguityscope = ambiguityscope
+
def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='memberRefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='memberRefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -961,35 +1080,44 @@ def export(self, outfile, level, namespace_='', name_='memberRefType', namespace
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'):
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.ambiguityscope is not None:
- outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
+ outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(
+ self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'):
if self.scope is not None:
showIndent(outfile, level)
- outfile.write('<%sscope>%s%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
+ outfile.write('<%sscope>%s%sscope>\n' % (namespace_, self.format_string(
+ quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+
def hasContent_(self):
if (
self.scope is not None or
self.name is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='memberRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.virt is not None:
showIndent(outfile, level)
@@ -1003,17 +1131,22 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.ambiguityscope is not None:
showIndent(outfile, level)
outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding))
+ outfile.write('scope=%s,\n' % quote_python(
+ self.scope).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('virt'):
self.virt = attrs.get('virt').value
@@ -1023,15 +1156,16 @@ def buildAttributes(self, attrs):
self.refid = attrs.get('refid').value
if attrs.get('ambiguityscope'):
self.ambiguityscope = attrs.get('ambiguityscope').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'scope':
+ nodeName_ == 'scope':
scope_ = ''
for text__content_ in child_.childNodes:
scope_ += text__content_.nodeValue
self.scope = scope_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
@@ -1042,8 +1176,10 @@ def buildChildren(self, child_, nodeName_):
class scope(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if scope.subclass:
return scope.subclass(*args_, **kwargs_)
@@ -1052,6 +1188,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1063,33 +1200,40 @@ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='scope'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='scope'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='scope'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1097,21 +1241,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class scope
class name(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if name.subclass:
return name.subclass(*args_, **kwargs_)
@@ -1120,6 +1268,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1131,33 +1280,40 @@ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='name'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='name'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='name'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1165,19 +1321,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class name
class compoundRefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.virt = virt
self.prot = prot
@@ -1190,6 +1349,7 @@ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=No
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if compoundRefType.subclass:
return compoundRefType.subclass(*args_, **kwargs_)
@@ -1204,40 +1364,48 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='compoundRefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='compoundRefType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'):
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='compoundRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.virt is not None:
showIndent(outfile, level)
@@ -1248,9 +1416,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1258,6 +1428,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('virt'):
self.virt = attrs.get('virt').value
@@ -1265,21 +1436,23 @@ def buildAttributes(self, attrs):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class compoundRefType
class reimplementType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
if mixedclass_ is None:
@@ -1290,6 +1463,7 @@ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if reimplementType.subclass:
return reimplementType.subclass(*args_, **kwargs_)
@@ -1300,43 +1474,53 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='reimplementType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='reimplementType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='reimplementType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1344,24 +1528,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class reimplementType
class incType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.local = local
self.refid = refid
@@ -1373,6 +1560,7 @@ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, conten
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if incType.subclass:
return incType.subclass(*args_, **kwargs_)
@@ -1385,6 +1573,7 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1392,31 +1581,37 @@ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_='
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='incType'):
if self.local is not None:
outfile.write(' local=%s' % (quote_attrib(self.local), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='incType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='incType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.local is not None:
showIndent(outfile, level)
@@ -1424,9 +1619,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1434,26 +1631,29 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('local'):
self.local = attrs.get('local').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class incType
class refType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.prot = prot
self.refid = refid
@@ -1465,6 +1665,7 @@ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if refType.subclass:
return refType.subclass(*args_, **kwargs_)
@@ -1477,6 +1678,7 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1484,31 +1686,37 @@ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_='
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='refType'):
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='refType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='refType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.prot is not None:
showIndent(outfile, level)
@@ -1516,9 +1724,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1526,26 +1736,29 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class refType
class refTextType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
self.kindref = kindref
@@ -1558,6 +1771,7 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if refTextType.subclass:
return refTextType.subclass(*args_, **kwargs_)
@@ -1572,6 +1786,7 @@ def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1579,33 +1794,40 @@ def export(self, outfile, level, namespace_='', name_='refTextType', namespacede
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.kindref is not None:
outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(
+ self.external).encode(ExternalEncoding), input_name='external'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='refTextType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='refTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
@@ -1616,9 +1838,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1626,6 +1850,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
@@ -1633,21 +1858,23 @@ def buildAttributes(self, attrs):
self.kindref = attrs.get('kindref').value
if attrs.get('external'):
self.external = attrs.get('external').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class refTextType
class sectiondefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, header=None, description=None, memberdef=None):
self.kind = kind
self.header = header
@@ -1656,6 +1883,7 @@ def __init__(self, kind=None, header=None, description=None, memberdef=None):
self.memberdef = []
else:
self.memberdef = memberdef
+
def factory(*args_, **kwargs_):
if sectiondefType.subclass:
return sectiondefType.subclass(*args_, **kwargs_)
@@ -1672,10 +1900,12 @@ def add_memberdef(self, value): self.memberdef.append(value)
def insert_memberdef(self, index, value): self.memberdef[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
+
def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='sectiondefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='sectiondefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -1683,38 +1913,47 @@ def export(self, outfile, level, namespace_='', name_='sectiondefType', namespac
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'):
if self.header is not None:
showIndent(outfile, level)
- outfile.write('<%sheader>%s%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
+ outfile.write('<%sheader>%s%sheader>\n' % (namespace_, self.format_string(
+ quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
if self.description:
- self.description.export(outfile, level, namespace_, name_='description')
+ self.description.export(
+ outfile, level, namespace_, name_='description')
for memberdef_ in self.memberdef:
memberdef_.export(outfile, level, namespace_, name_='memberdef')
+
def hasContent_(self):
if (
self.header is not None or
self.description is not None or
self.memberdef is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='sectiondefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding))
+ outfile.write('header=%s,\n' % quote_python(
+ self.header).encode(ExternalEncoding))
if self.description:
showIndent(outfile, level)
outfile.write('description=model_.descriptionType(\n')
@@ -1733,29 +1972,32 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'header':
+ nodeName_ == 'header':
header_ = ''
for text__content_ in child_.childNodes:
header_ += text__content_.nodeValue
self.header = header_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'description':
+ nodeName_ == 'description':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_description(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'memberdef':
+ nodeName_ == 'memberdef':
obj_ = memberdefType.factory()
obj_.build(child_)
self.memberdef.append(obj_)
@@ -1765,6 +2007,7 @@ def buildChildren(self, child_, nodeName_):
class memberdefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
self.initonly = initonly
self.kind = kind
@@ -1825,6 +2068,7 @@ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=
self.referencedby = []
else:
self.referencedby = referencedby
+
def factory(*args_, **kwargs_):
if memberdefType.subclass:
return memberdefType.subclass(*args_, **kwargs_)
@@ -1832,7 +2076,9 @@ def factory(*args_, **kwargs_):
return memberdefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_templateparamlist(self): return self.templateparamlist
- def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def set_templateparamlist(
+ self, templateparamlist): self.templateparamlist = templateparamlist
+
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_definition(self): return self.definition
@@ -1850,11 +2096,17 @@ def set_bitfield(self, bitfield): self.bitfield = bitfield
def get_reimplements(self): return self.reimplements
def set_reimplements(self, reimplements): self.reimplements = reimplements
def add_reimplements(self, value): self.reimplements.append(value)
- def insert_reimplements(self, index, value): self.reimplements[index] = value
+ def insert_reimplements(
+ self, index, value): self.reimplements[index] = value
+
def get_reimplementedby(self): return self.reimplementedby
- def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby
+ def set_reimplementedby(
+ self, reimplementedby): self.reimplementedby = reimplementedby
+
def add_reimplementedby(self, value): self.reimplementedby.append(value)
- def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value
+ def insert_reimplementedby(
+ self, index, value): self.reimplementedby[index] = value
+
def get_param(self): return self.param
def set_param(self, param): self.param = param
def add_param(self, value): self.param.append(value)
@@ -1868,11 +2120,17 @@ def set_initializer(self, initializer): self.initializer = initializer
def get_exceptions(self): return self.exceptions
def set_exceptions(self, exceptions): self.exceptions = exceptions
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def set_detaileddescription(
+ self, detaileddescription): self.detaileddescription = detaileddescription
+
def get_inbodydescription(self): return self.inbodydescription
- def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription
+ def set_inbodydescription(
+ self, inbodydescription): self.inbodydescription = inbodydescription
+
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_references(self): return self.references
@@ -1882,7 +2140,9 @@ def insert_references(self, index, value): self.references[index] = value
def get_referencedby(self): return self.referencedby
def set_referencedby(self, referencedby): self.referencedby = referencedby
def add_referencedby(self, value): self.referencedby.append(value)
- def insert_referencedby(self, index, value): self.referencedby[index] = value
+ def insert_referencedby(
+ self, index, value): self.referencedby[index] = value
+
def get_initonly(self): return self.initonly
def set_initonly(self, initonly): self.initonly = initonly
def get_kind(self): return self.kind
@@ -1925,10 +2185,12 @@ def get_settable(self): return self.settable
def set_settable(self, settable): self.settable = settable
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='memberdefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='memberdefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -1936,6 +2198,7 @@ def export(self, outfile, level, namespace_='', name_='memberdefType', namespace
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'):
if self.initonly is not None:
outfile.write(' initonly=%s' % (quote_attrib(self.initonly), ))
@@ -1978,54 +2241,73 @@ def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType')
if self.settable is not None:
outfile.write(' settable=%s' % (quote_attrib(self.settable), ))
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'):
if self.templateparamlist:
- self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ self.templateparamlist.export(
+ outfile, level, namespace_, name_='templateparamlist')
if self.type_:
self.type_.export(outfile, level, namespace_, name_='type')
if self.definition is not None:
showIndent(outfile, level)
- outfile.write('<%sdefinition>%s%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
+ outfile.write('<%sdefinition>%s%sdefinition>\n' % (namespace_, self.format_string(
+ quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
if self.argsstring is not None:
showIndent(outfile, level)
- outfile.write('<%sargsstring>%s%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
+ outfile.write('<%sargsstring>%s%sargsstring>\n' % (namespace_, self.format_string(
+ quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
if self.read is not None:
showIndent(outfile, level)
- outfile.write('<%sread>%s%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
+ outfile.write('<%sread>%s%sread>\n' % (namespace_, self.format_string(
+ quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
if self.write is not None:
showIndent(outfile, level)
- outfile.write('<%swrite>%s%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
+ outfile.write('<%swrite>%s%swrite>\n' % (namespace_, self.format_string(
+ quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
if self.bitfield is not None:
showIndent(outfile, level)
- outfile.write('<%sbitfield>%s%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
+ outfile.write('<%sbitfield>%s%sbitfield>\n' % (namespace_, self.format_string(
+ quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
for reimplements_ in self.reimplements:
- reimplements_.export(outfile, level, namespace_, name_='reimplements')
+ reimplements_.export(
+ outfile, level, namespace_, name_='reimplements')
for reimplementedby_ in self.reimplementedby:
- reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby')
+ reimplementedby_.export(
+ outfile, level, namespace_, name_='reimplementedby')
for param_ in self.param:
param_.export(outfile, level, namespace_, name_='param')
for enumvalue_ in self.enumvalue:
enumvalue_.export(outfile, level, namespace_, name_='enumvalue')
if self.initializer:
- self.initializer.export(outfile, level, namespace_, name_='initializer')
+ self.initializer.export(
+ outfile, level, namespace_, name_='initializer')
if self.exceptions:
- self.exceptions.export(outfile, level, namespace_, name_='exceptions')
+ self.exceptions.export(
+ outfile, level, namespace_, name_='exceptions')
if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ self.briefdescription.export(
+ outfile, level, namespace_, name_='briefdescription')
if self.detaileddescription:
- self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ self.detaileddescription.export(
+ outfile, level, namespace_, name_='detaileddescription')
if self.inbodydescription:
- self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription')
+ self.inbodydescription.export(
+ outfile, level, namespace_, name_='inbodydescription')
if self.location:
- self.location.export(outfile, level, namespace_, name_='location', )
+ self.location.export(
+ outfile, level, namespace_, name_='location', )
for references_ in self.references:
references_.export(outfile, level, namespace_, name_='references')
for referencedby_ in self.referencedby:
- referencedby_.export(outfile, level, namespace_, name_='referencedby')
+ referencedby_.export(
+ outfile, level, namespace_, name_='referencedby')
+
def hasContent_(self):
if (
self.templateparamlist is not None or
@@ -2048,15 +2330,17 @@ def hasContent_(self):
self.location is not None or
self.references is not None or
self.referencedby is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='memberdefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.initonly is not None:
showIndent(outfile, level)
@@ -2121,11 +2405,13 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
if self.templateparamlist:
showIndent(outfile, level)
outfile.write('templateparamlist=model_.templateparamlistType(\n')
- self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ self.templateparamlist.exportLiteral(
+ outfile, level, name_='templateparamlist')
showIndent(outfile, level)
outfile.write('),\n')
if self.type_:
@@ -2135,17 +2421,23 @@ def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
- outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding))
+ outfile.write('definition=%s,\n' % quote_python(
+ self.definition).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding))
+ outfile.write('argsstring=%s,\n' % quote_python(
+ self.argsstring).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding))
+ outfile.write('read=%s,\n' % quote_python(
+ self.read).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding))
+ outfile.write('write=%s,\n' % quote_python(
+ self.write).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding))
+ outfile.write('bitfield=%s,\n' % quote_python(
+ self.bitfield).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('reimplements=[\n')
level += 1
@@ -2164,7 +2456,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for reimplementedby in self.reimplementedby:
showIndent(outfile, level)
outfile.write('model_.reimplementedby(\n')
- reimplementedby.exportLiteral(outfile, level, name_='reimplementedby')
+ reimplementedby.exportLiteral(
+ outfile, level, name_='reimplementedby')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -2209,19 +2502,22 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ self.briefdescription.exportLiteral(
+ outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.detaileddescription:
showIndent(outfile, level)
outfile.write('detaileddescription=model_.descriptionType(\n')
- self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ self.detaileddescription.exportLiteral(
+ outfile, level, name_='detaileddescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.inbodydescription:
showIndent(outfile, level)
outfile.write('inbodydescription=model_.descriptionType(\n')
- self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription')
+ self.inbodydescription.exportLiteral(
+ outfile, level, name_='inbodydescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.location:
@@ -2254,12 +2550,14 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('initonly'):
self.initonly = attrs.get('initonly').value
@@ -2303,110 +2601,111 @@ def buildAttributes(self, attrs):
self.settable = attrs.get('settable').value
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'templateparamlist':
+ nodeName_ == 'templateparamlist':
obj_ = templateparamlistType.factory()
obj_.build(child_)
self.set_templateparamlist(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'type':
+ nodeName_ == 'type':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_type(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'definition':
+ nodeName_ == 'definition':
definition_ = ''
for text__content_ in child_.childNodes:
definition_ += text__content_.nodeValue
self.definition = definition_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'argsstring':
+ nodeName_ == 'argsstring':
argsstring_ = ''
for text__content_ in child_.childNodes:
argsstring_ += text__content_.nodeValue
self.argsstring = argsstring_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'read':
+ nodeName_ == 'read':
read_ = ''
for text__content_ in child_.childNodes:
read_ += text__content_.nodeValue
self.read = read_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'write':
+ nodeName_ == 'write':
write_ = ''
for text__content_ in child_.childNodes:
write_ += text__content_.nodeValue
self.write = write_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'bitfield':
+ nodeName_ == 'bitfield':
bitfield_ = ''
for text__content_ in child_.childNodes:
bitfield_ += text__content_.nodeValue
self.bitfield = bitfield_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'reimplements':
+ nodeName_ == 'reimplements':
obj_ = reimplementType.factory()
obj_.build(child_)
self.reimplements.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'reimplementedby':
+ nodeName_ == 'reimplementedby':
obj_ = reimplementType.factory()
obj_.build(child_)
self.reimplementedby.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'param':
+ nodeName_ == 'param':
obj_ = paramType.factory()
obj_.build(child_)
self.param.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'enumvalue':
+ nodeName_ == 'enumvalue':
obj_ = enumvalueType.factory()
obj_.build(child_)
self.enumvalue.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'initializer':
+ nodeName_ == 'initializer':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_initializer(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'exceptions':
+ nodeName_ == 'exceptions':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_exceptions(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
+ nodeName_ == 'detaileddescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'inbodydescription':
+ nodeName_ == 'inbodydescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_inbodydescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'location':
+ nodeName_ == 'location':
obj_ = locationType.factory()
obj_.build(child_)
self.set_location(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'references':
+ nodeName_ == 'references':
obj_ = referenceType.factory()
obj_.build(child_)
self.references.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'referencedby':
+ nodeName_ == 'referencedby':
obj_ = referenceType.factory()
obj_.build(child_)
self.referencedby.append(obj_)
@@ -2416,8 +2715,10 @@ def buildChildren(self, child_, nodeName_):
class definition(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if definition.subclass:
return definition.subclass(*args_, **kwargs_)
@@ -2426,6 +2727,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2437,33 +2739,40 @@ def export(self, outfile, level, namespace_='', name_='definition', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='definition'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='definition'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='definition'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2471,21 +2780,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class definition
class argsstring(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if argsstring.subclass:
return argsstring.subclass(*args_, **kwargs_)
@@ -2494,6 +2807,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2505,33 +2819,40 @@ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='argsstring'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='argsstring'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2539,21 +2860,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class argsstring
class read(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if read.subclass:
return read.subclass(*args_, **kwargs_)
@@ -2562,6 +2887,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2573,33 +2899,40 @@ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='read'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='read'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='read'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2607,21 +2940,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class read
class write(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if write.subclass:
return write.subclass(*args_, **kwargs_)
@@ -2630,6 +2967,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2641,33 +2979,40 @@ def export(self, outfile, level, namespace_='', name_='write', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='write'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='write'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='write'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2675,21 +3020,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class write
class bitfield(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if bitfield.subclass:
return bitfield.subclass(*args_, **kwargs_)
@@ -2698,6 +3047,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2709,33 +3059,40 @@ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='bitfield'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='bitfield'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2743,19 +3100,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class bitfield
class descriptionType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -2765,6 +3125,7 @@ def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if descriptionType.subclass:
return descriptionType.subclass(*args_, **kwargs_)
@@ -2783,35 +3144,43 @@ def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
+
def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='descriptionType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='descriptionType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect1 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='descriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -2837,46 +3206,49 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
+ nodeName_ == 'sect1':
childobj_ = docSect1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect1', childobj_)
+ MixedContainer.TypeNone, 'sect1', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class descriptionType
@@ -2884,6 +3256,7 @@ def buildChildren(self, child_, nodeName_):
class enumvalueType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
self.prot = prot
self.id = id
@@ -2895,6 +3268,7 @@ def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescrip
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if enumvalueType.subclass:
return enumvalueType.subclass(*args_, **kwargs_)
@@ -2906,43 +3280,55 @@ def set_name(self, name): self.name = name
def get_initializer(self): return self.initializer
def set_initializer(self, initializer): self.initializer = initializer
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def set_detaileddescription(
+ self, detaileddescription): self.detaileddescription = detaileddescription
+
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='enumvalueType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='enumvalueType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'):
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.name is not None or
self.initializer is not None or
self.briefdescription is not None or
self.detaileddescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='enumvalueType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.prot is not None:
showIndent(outfile, level)
@@ -2950,6 +3336,7 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -2975,51 +3362,54 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
- MixedContainer.TypeString, 'name', valuestr_)
+ MixedContainer.TypeString, 'name', valuestr_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'initializer':
+ nodeName_ == 'initializer':
childobj_ = linkedTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'initializer', childobj_)
+ MixedContainer.TypeNone, 'initializer', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
childobj_ = descriptionType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'briefdescription', childobj_)
+ MixedContainer.TypeNone, 'briefdescription', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
+ nodeName_ == 'detaileddescription':
childobj_ = descriptionType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'detaileddescription', childobj_)
+ MixedContainer.TypeNone, 'detaileddescription', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class enumvalueType
@@ -3027,11 +3417,13 @@ def buildChildren(self, child_, nodeName_):
class templateparamlistType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, param=None):
if param is None:
self.param = []
else:
self.param = param
+
def factory(*args_, **kwargs_):
if templateparamlistType.subclass:
return templateparamlistType.subclass(*args_, **kwargs_)
@@ -3042,10 +3434,12 @@ def get_param(self): return self.param
def set_param(self, param): self.param = param
def add_param(self, value): self.param.append(value)
def insert_param(self, index, value): self.param[index] = value
+
def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='templateparamlistType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -3053,25 +3447,31 @@ def export(self, outfile, level, namespace_='', name_='templateparamlistType', n
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'):
for param_ in self.param:
param_.export(outfile, level, namespace_, name_='param')
+
def hasContent_(self):
if (
self.param is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='templateparamlistType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('param=[\n')
@@ -3085,17 +3485,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'param':
+ nodeName_ == 'param':
obj_ = paramType.factory()
obj_.build(child_)
self.param.append(obj_)
@@ -3105,6 +3508,7 @@ def buildChildren(self, child_, nodeName_):
class paramType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None):
self.type_ = type_
self.declname = declname
@@ -3112,6 +3516,7 @@ def __init__(self, type_=None, declname=None, defname=None, array=None, defval=N
self.array = array
self.defval = defval
self.briefdescription = briefdescription
+
def factory(*args_, **kwargs_):
if paramType.subclass:
return paramType.subclass(*args_, **kwargs_)
@@ -3129,7 +3534,10 @@ def set_array(self, array): self.array = array
def get_defval(self): return self.defval
def set_defval(self, defval): self.defval = defval
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3141,24 +3549,31 @@ def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='paramType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='paramType'):
if self.type_:
self.type_.export(outfile, level, namespace_, name_='type')
if self.declname is not None:
showIndent(outfile, level)
- outfile.write('<%sdeclname>%s%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
+ outfile.write('<%sdeclname>%s%sdeclname>\n' % (namespace_, self.format_string(
+ quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
if self.defname is not None:
showIndent(outfile, level)
- outfile.write('<%sdefname>%s%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
+ outfile.write('<%sdefname>%s%sdefname>\n' % (namespace_, self.format_string(
+ quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
if self.array is not None:
showIndent(outfile, level)
- outfile.write('<%sarray>%s%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
+ outfile.write('<%sarray>%s%sarray>\n' % (namespace_, self.format_string(
+ quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
if self.defval:
self.defval.export(outfile, level, namespace_, name_='defval')
if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ self.briefdescription.export(
+ outfile, level, namespace_, name_='briefdescription')
+
def hasContent_(self):
if (
self.type_ is not None or
@@ -3167,17 +3582,20 @@ def hasContent_(self):
self.array is not None or
self.defval is not None or
self.briefdescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='paramType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
if self.type_:
showIndent(outfile, level)
@@ -3186,11 +3604,14 @@ def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
- outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding))
+ outfile.write('declname=%s,\n' % quote_python(
+ self.declname).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding))
+ outfile.write('defname=%s,\n' % quote_python(
+ self.defname).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding))
+ outfile.write('array=%s,\n' % quote_python(
+ self.array).encode(ExternalEncoding))
if self.defval:
showIndent(outfile, level)
outfile.write('defval=model_.linkedTextType(\n')
@@ -3200,48 +3621,52 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ self.briefdescription.exportLiteral(
+ outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'type':
+ nodeName_ == 'type':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_type(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'declname':
+ nodeName_ == 'declname':
declname_ = ''
for text__content_ in child_.childNodes:
declname_ += text__content_.nodeValue
self.declname = declname_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'defname':
+ nodeName_ == 'defname':
defname_ = ''
for text__content_ in child_.childNodes:
defname_ += text__content_.nodeValue
self.defname = defname_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'array':
+ nodeName_ == 'array':
array_ = ''
for text__content_ in child_.childNodes:
array_ += text__content_.nodeValue
self.array = array_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'defval':
+ nodeName_ == 'defval':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_defval(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
@@ -3251,8 +3676,10 @@ def buildChildren(self, child_, nodeName_):
class declname(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if declname.subclass:
return declname.subclass(*args_, **kwargs_)
@@ -3261,6 +3688,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3272,33 +3700,40 @@ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='declname'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='declname'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='declname'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3306,21 +3741,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class declname
class defname(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if defname.subclass:
return defname.subclass(*args_, **kwargs_)
@@ -3329,6 +3768,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3340,33 +3780,40 @@ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_='
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='defname'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='defname'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='defname'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3374,21 +3821,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class defname
class array(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if array.subclass:
return array.subclass(*args_, **kwargs_)
@@ -3397,6 +3848,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3408,33 +3860,40 @@ def export(self, outfile, level, namespace_='', name_='array', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='array'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='array'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='array'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3442,19 +3901,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class array
class linkedTextType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, ref=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -3464,6 +3926,7 @@ def __init__(self, ref=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if linkedTextType.subclass:
return linkedTextType.subclass(*args_, **kwargs_)
@@ -3474,32 +3937,40 @@ def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
+
def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='linkedTextType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='linkedTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.ref is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='linkedTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -3507,25 +3978,28 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
+ nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
+ MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class linkedTextType
@@ -3533,11 +4007,13 @@ def buildChildren(self, child_, nodeName_):
class graphType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, node=None):
if node is None:
self.node = []
else:
self.node = node
+
def factory(*args_, **kwargs_):
if graphType.subclass:
return graphType.subclass(*args_, **kwargs_)
@@ -3548,6 +4024,7 @@ def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
+
def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3559,25 +4036,31 @@ def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='graphType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='graphType'):
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
+
def hasContent_(self):
if (
self.node is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='graphType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('node=[\n')
@@ -3591,17 +4074,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'node':
+ nodeName_ == 'node':
obj_ = nodeType.factory()
obj_.build(child_)
self.node.append(obj_)
@@ -3611,6 +4097,7 @@ def buildChildren(self, child_, nodeName_):
class nodeType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, label=None, link=None, childnode=None):
self.id = id
self.label = label
@@ -3619,6 +4106,7 @@ def __init__(self, id=None, label=None, link=None, childnode=None):
self.childnode = []
else:
self.childnode = childnode
+
def factory(*args_, **kwargs_):
if nodeType.subclass:
return nodeType.subclass(*args_, **kwargs_)
@@ -3635,6 +4123,7 @@ def add_childnode(self, value): self.childnode.append(value)
def insert_childnode(self, index, value): self.childnode[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3646,38 +4135,47 @@ def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='nodeType'):
if self.label is not None:
showIndent(outfile, level)
- outfile.write('<%slabel>%s%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
+ outfile.write('<%slabel>%s%slabel>\n' % (namespace_, self.format_string(
+ quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
if self.link:
self.link.export(outfile, level, namespace_, name_='link')
for childnode_ in self.childnode:
childnode_.export(outfile, level, namespace_, name_='childnode')
+
def hasContent_(self):
if (
self.label is not None or
self.link is not None or
self.childnode is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='nodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding))
+ outfile.write('label=%s,\n' % quote_python(
+ self.label).encode(ExternalEncoding))
if self.link:
showIndent(outfile, level)
outfile.write('link=model_.linkType(\n')
@@ -3696,29 +4194,32 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'label':
+ nodeName_ == 'label':
label_ = ''
for text__content_ in child_.childNodes:
label_ += text__content_.nodeValue
self.label = label_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'link':
+ nodeName_ == 'link':
obj_ = linkType.factory()
obj_.build(child_)
self.set_link(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'childnode':
+ nodeName_ == 'childnode':
obj_ = childnodeType.factory()
obj_.build(child_)
self.childnode.append(obj_)
@@ -3728,8 +4229,10 @@ def buildChildren(self, child_, nodeName_):
class label(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if label.subclass:
return label.subclass(*args_, **kwargs_)
@@ -3738,6 +4241,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3749,33 +4253,40 @@ def export(self, outfile, level, namespace_='', name_='label', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='label'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='label'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='label'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3783,19 +4294,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class label
class childnodeType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, relation=None, refid=None, edgelabel=None):
self.relation = relation
self.refid = refid
@@ -3803,6 +4317,7 @@ def __init__(self, relation=None, refid=None, edgelabel=None):
self.edgelabel = []
else:
self.edgelabel = edgelabel
+
def factory(*args_, **kwargs_):
if childnodeType.subclass:
return childnodeType.subclass(*args_, **kwargs_)
@@ -3817,10 +4332,12 @@ def get_relation(self): return self.relation
def set_relation(self, relation): self.relation = relation
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='childnodeType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='childnodeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -3828,27 +4345,34 @@ def export(self, outfile, level, namespace_='', name_='childnodeType', namespace
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'):
if self.relation is not None:
outfile.write(' relation=%s' % (quote_attrib(self.relation), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'):
for edgelabel_ in self.edgelabel:
showIndent(outfile, level)
- outfile.write('<%sedgelabel>%s%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
+ outfile.write('<%sedgelabel>%s%sedgelabel>\n' % (namespace_, self.format_string(
+ quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
+
def hasContent_(self):
if (
self.edgelabel is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='childnodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.relation is not None:
showIndent(outfile, level)
@@ -3856,30 +4380,35 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('edgelabel=[\n')
level += 1
for edgelabel in self.edgelabel:
showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding))
+ outfile.write('%s,\n' % quote_python(
+ edgelabel).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('relation'):
self.relation = attrs.get('relation').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'edgelabel':
+ nodeName_ == 'edgelabel':
edgelabel_ = ''
for text__content_ in child_.childNodes:
edgelabel_ += text__content_.nodeValue
@@ -3890,8 +4419,10 @@ def buildChildren(self, child_, nodeName_):
class edgelabel(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if edgelabel.subclass:
return edgelabel.subclass(*args_, **kwargs_)
@@ -3900,6 +4431,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3911,33 +4443,40 @@ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='edgelabel'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3945,23 +4484,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class edgelabel
class linkType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, external=None, valueOf_=''):
self.refid = refid
self.external = external
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if linkType.subclass:
return linkType.subclass(*args_, **kwargs_)
@@ -3974,6 +4517,7 @@ def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3985,31 +4529,38 @@ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='linkType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(
+ self.external).encode(ExternalEncoding), input_name='external'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='linkType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='linkType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
@@ -4017,9 +4568,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4027,27 +4580,31 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('external'):
self.external = attrs.get('external').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class linkType
class listingType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, codeline=None):
if codeline is None:
self.codeline = []
else:
self.codeline = codeline
+
def factory(*args_, **kwargs_):
if listingType.subclass:
return listingType.subclass(*args_, **kwargs_)
@@ -4058,6 +4615,7 @@ def get_codeline(self): return self.codeline
def set_codeline(self, codeline): self.codeline = codeline
def add_codeline(self, value): self.codeline.append(value)
def insert_codeline(self, index, value): self.codeline[index] = value
+
def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4069,25 +4627,31 @@ def export(self, outfile, level, namespace_='', name_='listingType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
for codeline_ in self.codeline:
codeline_.export(outfile, level, namespace_, name_='codeline')
+
def hasContent_(self):
if (
self.codeline is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='listingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('codeline=[\n')
@@ -4101,17 +4665,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'codeline':
+ nodeName_ == 'codeline':
obj_ = codelineType.factory()
obj_.build(child_)
self.codeline.append(obj_)
@@ -4121,6 +4688,7 @@ def buildChildren(self, child_, nodeName_):
class codelineType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
self.external = external
self.lineno = lineno
@@ -4130,6 +4698,7 @@ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlig
self.highlight = []
else:
self.highlight = highlight
+
def factory(*args_, **kwargs_):
if codelineType.subclass:
return codelineType.subclass(*args_, **kwargs_)
@@ -4148,6 +4717,7 @@ def get_refkind(self): return self.refkind
def set_refkind(self, refkind): self.refkind = refkind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4159,30 +4729,37 @@ def export(self, outfile, level, namespace_='', name_='codelineType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
if self.external is not None:
outfile.write(' external=%s' % (quote_attrib(self.external), ))
if self.lineno is not None:
- outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno'))
+ outfile.write(' lineno="%s"' % self.format_integer(
+ self.lineno, input_name='lineno'))
if self.refkind is not None:
outfile.write(' refkind=%s' % (quote_attrib(self.refkind), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
for highlight_ in self.highlight:
highlight_.export(outfile, level, namespace_, name_='highlight')
+
def hasContent_(self):
if (
self.highlight is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='codelineType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
@@ -4196,6 +4773,7 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('highlight=[\n')
@@ -4209,27 +4787,30 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('external'):
self.external = attrs.get('external').value
if attrs.get('lineno'):
try:
self.lineno = int(attrs.get('lineno').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (lineno): %s' % exp)
if attrs.get('refkind'):
self.refkind = attrs.get('refkind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'highlight':
+ nodeName_ == 'highlight':
obj_ = highlightType.factory()
obj_.build(child_)
self.highlight.append(obj_)
@@ -4239,6 +4820,7 @@ def buildChildren(self, child_, nodeName_):
class highlightType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None):
self.classxx = classxx
if mixedclass_ is None:
@@ -4249,6 +4831,7 @@ def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=N
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if highlightType.subclass:
return highlightType.subclass(*args_, **kwargs_)
@@ -4265,36 +4848,44 @@ def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def get_class(self): return self.classxx
def set_class(self, classxx): self.classxx = classxx
+
def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='highlightType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='highlightType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'):
if self.classxx is not None:
outfile.write(' class=%s' % (quote_attrib(self.classxx), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='highlightType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.sp is not None or
self.ref is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='highlightType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.classxx is not None:
showIndent(outfile, level)
outfile.write('classxx = "%s",\n' % (self.classxx,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -4308,35 +4899,38 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('class'):
self.classxx = attrs.get('class').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sp':
+ nodeName_ == 'sp':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
- MixedContainer.TypeString, 'sp', valuestr_)
+ MixedContainer.TypeString, 'sp', valuestr_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
+ nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
+ MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class highlightType
@@ -4344,8 +4938,10 @@ def buildChildren(self, child_, nodeName_):
class sp(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if sp.subclass:
return sp.subclass(*args_, **kwargs_)
@@ -4354,6 +4950,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4365,33 +4962,40 @@ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='sp'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='sp'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='sp'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4399,19 +5003,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class sp
class referenceType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
self.endline = endline
self.startline = startline
@@ -4425,6 +5032,7 @@ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, v
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if referenceType.subclass:
return referenceType.subclass(*args_, **kwargs_)
@@ -4441,42 +5049,53 @@ def get_compoundref(self): return self.compoundref
def set_compoundref(self, compoundref): self.compoundref = compoundref
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='referenceType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='referenceType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'):
if self.endline is not None:
- outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline'))
+ outfile.write(' endline="%s"' % self.format_integer(
+ self.endline, input_name='endline'))
if self.startline is not None:
- outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline'))
+ outfile.write(' startline="%s"' % self.format_integer(
+ self.startline, input_name='startline'))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.compoundref is not None:
- outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
+ outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(
+ self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='referenceType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='referenceType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.endline is not None:
showIndent(outfile, level)
@@ -4490,9 +5109,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.compoundref is not None:
showIndent(outfile, level)
outfile.write('compoundref = %s,\n' % (self.compoundref,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4500,36 +5121,39 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('endline'):
try:
self.endline = int(attrs.get('endline').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (endline): %s' % exp)
if attrs.get('startline'):
try:
self.startline = int(attrs.get('startline').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (startline): %s' % exp)
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('compoundref'):
self.compoundref = attrs.get('compoundref').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class referenceType
class locationType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
self.bodystart = bodystart
self.line = line
@@ -4537,6 +5161,7 @@ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=
self.bodyfile = bodyfile
self.file = file
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if locationType.subclass:
return locationType.subclass(*args_, **kwargs_)
@@ -4555,6 +5180,7 @@ def get_file(self): return self.file
def set_file(self, file): self.file = file
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4566,37 +5192,47 @@ def export(self, outfile, level, namespace_='', name_='locationType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='locationType'):
if self.bodystart is not None:
- outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart'))
+ outfile.write(' bodystart="%s"' % self.format_integer(
+ self.bodystart, input_name='bodystart'))
if self.line is not None:
- outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line'))
+ outfile.write(' line="%s"' % self.format_integer(
+ self.line, input_name='line'))
if self.bodyend is not None:
- outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend'))
+ outfile.write(' bodyend="%s"' % self.format_integer(
+ self.bodyend, input_name='bodyend'))
if self.bodyfile is not None:
- outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
+ outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(
+ self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
if self.file is not None:
- outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), ))
+ outfile.write(' file=%s' % (self.format_string(quote_attrib(
+ self.file).encode(ExternalEncoding), input_name='file'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='locationType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='locationType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.bodystart is not None:
showIndent(outfile, level)
@@ -4613,9 +5249,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.file is not None:
showIndent(outfile, level)
outfile.write('file = %s,\n' % (self.file,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4623,37 +5261,40 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('bodystart'):
try:
self.bodystart = int(attrs.get('bodystart').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (bodystart): %s' % exp)
if attrs.get('line'):
try:
self.line = int(attrs.get('line').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (line): %s' % exp)
if attrs.get('bodyend'):
try:
self.bodyend = int(attrs.get('bodyend').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (bodyend): %s' % exp)
if attrs.get('bodyfile'):
self.bodyfile = attrs.get('bodyfile').value
if attrs.get('file'):
self.file = attrs.get('file').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class locationType
class docSect1Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -4664,6 +5305,7 @@ def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mi
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect1Type.subclass:
return docSect1Type.subclass(*args_, **kwargs_)
@@ -4684,6 +5326,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4691,31 +5334,38 @@ def export(self, outfile, level, namespace_='', name_='docSect1Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect2 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect1Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -4741,47 +5391,50 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect2':
+ nodeName_ == 'sect2':
childobj_ = docSect2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect2', childobj_)
+ MixedContainer.TypeNone, 'sect2', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect1Type
@@ -4789,6 +5442,7 @@ def buildChildren(self, child_, nodeName_):
class docSect2Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -4799,6 +5453,7 @@ def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mi
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect2Type.subclass:
return docSect2Type.subclass(*args_, **kwargs_)
@@ -4819,6 +5474,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4826,31 +5482,38 @@ def export(self, outfile, level, namespace_='', name_='docSect2Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect3 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect2Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -4876,47 +5539,50 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
+ nodeName_ == 'sect3':
childobj_ = docSect3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
+ MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect2Type
@@ -4924,6 +5590,7 @@ def buildChildren(self, child_, nodeName_):
class docSect3Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -4934,6 +5601,7 @@ def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mi
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect3Type.subclass:
return docSect3Type.subclass(*args_, **kwargs_)
@@ -4954,6 +5622,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4961,31 +5630,38 @@ def export(self, outfile, level, namespace_='', name_='docSect3Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect4 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect3Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5011,47 +5687,50 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect4':
+ nodeName_ == 'sect4':
childobj_ = docSect4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect4', childobj_)
+ MixedContainer.TypeNone, 'sect4', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect3Type
@@ -5059,6 +5738,7 @@ def buildChildren(self, child_, nodeName_):
class docSect4Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -5069,6 +5749,7 @@ def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=No
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect4Type.subclass:
return docSect4Type.subclass(*args_, **kwargs_)
@@ -5085,6 +5766,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5092,30 +5774,37 @@ def export(self, outfile, level, namespace_='', name_='docSect4Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect4Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5135,40 +5824,43 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect4Type
@@ -5176,6 +5868,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5185,6 +5878,7 @@ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalType.subclass:
return docInternalType.subclass(*args_, **kwargs_)
@@ -5199,33 +5893,41 @@ def get_sect1(self): return self.sect1
def set_sect1(self, sect1): self.sect1 = sect1
def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect1 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5239,32 +5941,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
+ nodeName_ == 'sect1':
childobj_ = docSect1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect1', childobj_)
+ MixedContainer.TypeNone, 'sect1', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalType
@@ -5272,6 +5977,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS1Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5281,6 +5987,7 @@ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS1Type.subclass:
return docInternalS1Type.subclass(*args_, **kwargs_)
@@ -5295,33 +6002,41 @@ def get_sect2(self): return self.sect2
def set_sect2(self, sect2): self.sect2 = sect2
def add_sect2(self, value): self.sect2.append(value)
def insert_sect2(self, index, value): self.sect2[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS1Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect2 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS1Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5335,32 +6050,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect2':
+ nodeName_ == 'sect2':
childobj_ = docSect2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect2', childobj_)
+ MixedContainer.TypeNone, 'sect2', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS1Type
@@ -5368,6 +6086,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS2Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5377,6 +6096,7 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS2Type.subclass:
return docInternalS2Type.subclass(*args_, **kwargs_)
@@ -5391,33 +6111,41 @@ def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS2Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect3 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS2Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5431,32 +6159,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
+ nodeName_ == 'sect3':
childobj_ = docSect3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
+ MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS2Type
@@ -5464,6 +6195,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS3Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5473,6 +6205,7 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS3Type.subclass:
return docInternalS3Type.subclass(*args_, **kwargs_)
@@ -5487,33 +6220,41 @@ def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS3Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect3 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS3Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5527,32 +6268,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
+ nodeName_ == 'sect3':
childobj_ = docSect4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
+ MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS3Type
@@ -5560,6 +6304,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS4Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5569,6 +6314,7 @@ def __init__(self, para=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS4Type.subclass:
return docInternalS4Type.subclass(*args_, **kwargs_)
@@ -5579,32 +6325,40 @@ def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS4Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS4Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5612,25 +6366,28 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS4Type
@@ -5638,6 +6395,7 @@ def buildChildren(self, child_, nodeName_):
class docTitleType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5647,6 +6405,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docTitleType.subclass:
return docTitleType.subclass(*args_, **kwargs_)
@@ -5655,6 +6414,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5662,33 +6422,40 @@ def export(self, outfile, level, namespace_='', name_='docTitleType', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTitleType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5696,23 +6463,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docTitleType
class docParaType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5722,6 +6492,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docParaType.subclass:
return docParaType.subclass(*args_, **kwargs_)
@@ -5730,6 +6501,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5737,33 +6509,40 @@ def export(self, outfile, level, namespace_='', name_='docParaType', namespacede
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docParaType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParaType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5771,23 +6550,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docParaType
class docMarkupType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5797,6 +6579,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docMarkupType.subclass:
return docMarkupType.subclass(*args_, **kwargs_)
@@ -5805,40 +6588,49 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docMarkupType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docMarkupType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docMarkupType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5846,23 +6638,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docMarkupType
class docURLLink(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
self.url = url
if mixedclass_ is None:
@@ -5873,6 +6668,7 @@ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docURLLink.subclass:
return docURLLink.subclass(*args_, **kwargs_)
@@ -5883,6 +6679,7 @@ def get_url(self): return self.url
def set_url(self, url): self.url = url
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5890,36 +6687,44 @@ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
if self.url is not None:
- outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), ))
+ outfile.write(' url=%s' % (self.format_string(quote_attrib(
+ self.url).encode(ExternalEncoding), input_name='url'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docURLLink'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.url is not None:
showIndent(outfile, level)
outfile.write('url = %s,\n' % (self.url,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5927,24 +6732,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('url'):
self.url = attrs.get('url').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docURLLink
class docAnchorType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -5955,6 +6763,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docAnchorType.subclass:
return docAnchorType.subclass(*args_, **kwargs_)
@@ -5965,43 +6774,53 @@ def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docAnchorType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docAnchorType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docAnchorType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6009,24 +6828,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docAnchorType
class docFormulaType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -6037,6 +6859,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docFormulaType.subclass:
return docFormulaType.subclass(*args_, **kwargs_)
@@ -6047,43 +6870,53 @@ def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docFormulaType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docFormulaType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docFormulaType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6091,27 +6924,31 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docFormulaType
class docIndexEntryType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, primaryie=None, secondaryie=None):
self.primaryie = primaryie
self.secondaryie = secondaryie
+
def factory(*args_, **kwargs_):
if docIndexEntryType.subclass:
return docIndexEntryType.subclass(*args_, **kwargs_)
@@ -6122,10 +6959,12 @@ def get_primaryie(self): return self.primaryie
def set_primaryie(self, primaryie): self.primaryie = primaryie
def get_secondaryie(self): return self.secondaryie
def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie
+
def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docIndexEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6133,52 +6972,65 @@ def export(self, outfile, level, namespace_='', name_='docIndexEntryType', names
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'):
if self.primaryie is not None:
showIndent(outfile, level)
- outfile.write('<%sprimaryie>%s%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
+ outfile.write('<%sprimaryie>%s%sprimaryie>\n' % (namespace_, self.format_string(
+ quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
if self.secondaryie is not None:
showIndent(outfile, level)
- outfile.write('<%ssecondaryie>%s%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
+ outfile.write('<%ssecondaryie>%s%ssecondaryie>\n' % (namespace_, self.format_string(
+ quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
+
def hasContent_(self):
if (
self.primaryie is not None or
self.secondaryie is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docIndexEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding))
+ outfile.write('primaryie=%s,\n' % quote_python(
+ self.primaryie).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding))
+ outfile.write('secondaryie=%s,\n' % quote_python(
+ self.secondaryie).encode(ExternalEncoding))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'primaryie':
+ nodeName_ == 'primaryie':
primaryie_ = ''
for text__content_ in child_.childNodes:
primaryie_ += text__content_.nodeValue
self.primaryie = primaryie_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'secondaryie':
+ nodeName_ == 'secondaryie':
secondaryie_ = ''
for text__content_ in child_.childNodes:
secondaryie_ += text__content_.nodeValue
@@ -6189,11 +7041,13 @@ def buildChildren(self, child_, nodeName_):
class docListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, listitem=None):
if listitem is None:
self.listitem = []
else:
self.listitem = listitem
+
def factory(*args_, **kwargs_):
if docListType.subclass:
return docListType.subclass(*args_, **kwargs_)
@@ -6204,6 +7058,7 @@ def get_listitem(self): return self.listitem
def set_listitem(self, listitem): self.listitem = listitem
def add_listitem(self, value): self.listitem.append(value)
def insert_listitem(self, index, value): self.listitem[index] = value
+
def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6215,25 +7070,31 @@ def export(self, outfile, level, namespace_='', name_='docListType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docListType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docListType'):
for listitem_ in self.listitem:
listitem_.export(outfile, level, namespace_, name_='listitem')
+
def hasContent_(self):
if (
self.listitem is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('listitem=[\n')
@@ -6247,17 +7108,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'listitem':
+ nodeName_ == 'listitem':
obj_ = docListItemType.factory()
obj_.build(child_)
self.listitem.append(obj_)
@@ -6267,11 +7131,13 @@ def buildChildren(self, child_, nodeName_):
class docListItemType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None):
if para is None:
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docListItemType.subclass:
return docListItemType.subclass(*args_, **kwargs_)
@@ -6282,10 +7148,12 @@ def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
+
def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docListItemType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docListItemType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6293,25 +7161,31 @@ def export(self, outfile, level, namespace_='', name_='docListItemType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docListItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -6325,17 +7199,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -6345,6 +7222,7 @@ def buildChildren(self, child_, nodeName_):
class docSimpleSectType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, title=None, para=None):
self.kind = kind
self.title = title
@@ -6352,6 +7230,7 @@ def __init__(self, kind=None, title=None, para=None):
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docSimpleSectType.subclass:
return docSimpleSectType.subclass(*args_, **kwargs_)
@@ -6366,10 +7245,12 @@ def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
+
def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docSimpleSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6377,31 +7258,37 @@ def export(self, outfile, level, namespace_='', name_='docSimpleSectType', names
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'):
if self.title:
self.title.export(outfile, level, namespace_, name_='title')
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSimpleSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
+
def exportLiteralChildren(self, outfile, level, name_):
if self.title:
showIndent(outfile, level)
@@ -6421,23 +7308,26 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_title(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -6447,8 +7337,10 @@ def buildChildren(self, child_, nodeName_):
class docVarListEntryType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, term=None):
self.term = term
+
def factory(*args_, **kwargs_):
if docVarListEntryType.subclass:
return docVarListEntryType.subclass(*args_, **kwargs_)
@@ -6457,10 +7349,12 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def get_term(self): return self.term
def set_term(self, term): self.term = term
+
def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docVarListEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6468,25 +7362,31 @@ def export(self, outfile, level, namespace_='', name_='docVarListEntryType', nam
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
if self.term:
self.term.export(outfile, level, namespace_, name_='term', )
+
def hasContent_(self):
if (
self.term is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
if self.term:
showIndent(outfile, level)
@@ -6494,17 +7394,20 @@ def exportLiteralChildren(self, outfile, level, name_):
self.term.exportLiteral(outfile, level, name_='term')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'term':
+ nodeName_ == 'term':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_term(obj_)
@@ -6514,8 +7417,10 @@ def buildChildren(self, child_, nodeName_):
class docVariableListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if docVariableListType.subclass:
return docVariableListType.subclass(*args_, **kwargs_)
@@ -6524,10 +7429,12 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docVariableListType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docVariableListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6535,33 +7442,40 @@ def export(self, outfile, level, namespace_='', name_='docVariableListType', nam
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docVariableListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6569,19 +7483,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docVariableListType
class docRefTextType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
self.kindref = kindref
@@ -6594,6 +7511,7 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docRefTextType.subclass:
return docRefTextType.subclass(*args_, **kwargs_)
@@ -6608,40 +7526,49 @@ def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docRefTextType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docRefTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.kindref is not None:
outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(
+ self.external).encode(ExternalEncoding), input_name='external'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docRefTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
@@ -6652,9 +7579,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6662,6 +7591,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
@@ -6669,21 +7599,23 @@ def buildAttributes(self, attrs):
self.kindref = attrs.get('kindref').value
if attrs.get('external'):
self.external = attrs.get('external').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docRefTextType
class docTableType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, rows=None, cols=None, row=None, caption=None):
self.rows = rows
self.cols = cols
@@ -6692,6 +7624,7 @@ def __init__(self, rows=None, cols=None, row=None, caption=None):
else:
self.row = row
self.caption = caption
+
def factory(*args_, **kwargs_):
if docTableType.subclass:
return docTableType.subclass(*args_, **kwargs_)
@@ -6708,6 +7641,7 @@ def get_rows(self): return self.rows
def set_rows(self, rows): self.rows = rows
def get_cols(self): return self.cols
def set_cols(self, cols): self.cols = cols
+
def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6719,29 +7653,36 @@ def export(self, outfile, level, namespace_='', name_='docTableType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'):
if self.rows is not None:
- outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows'))
+ outfile.write(' rows="%s"' % self.format_integer(
+ self.rows, input_name='rows'))
if self.cols is not None:
- outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols'))
+ outfile.write(' cols="%s"' % self.format_integer(
+ self.cols, input_name='cols'))
+
def exportChildren(self, outfile, level, namespace_='', name_='docTableType'):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
if self.caption:
self.caption.export(outfile, level, namespace_, name_='caption')
+
def hasContent_(self):
if (
self.row is not None or
self.caption is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTableType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.rows is not None:
showIndent(outfile, level)
@@ -6749,6 +7690,7 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.cols is not None:
showIndent(outfile, level)
outfile.write('cols = %s,\n' % (self.cols,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('row=[\n')
@@ -6768,31 +7710,34 @@ def exportLiteralChildren(self, outfile, level, name_):
self.caption.exportLiteral(outfile, level, name_='caption')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('rows'):
try:
self.rows = int(attrs.get('rows').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (rows): %s' % exp)
if attrs.get('cols'):
try:
self.cols = int(attrs.get('cols').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (cols): %s' % exp)
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'row':
+ nodeName_ == 'row':
obj_ = docRowType.factory()
obj_.build(child_)
self.row.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'caption':
+ nodeName_ == 'caption':
obj_ = docCaptionType.factory()
obj_.build(child_)
self.set_caption(obj_)
@@ -6802,11 +7747,13 @@ def buildChildren(self, child_, nodeName_):
class docRowType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, entry=None):
if entry is None:
self.entry = []
else:
self.entry = entry
+
def factory(*args_, **kwargs_):
if docRowType.subclass:
return docRowType.subclass(*args_, **kwargs_)
@@ -6817,6 +7764,7 @@ def get_entry(self): return self.entry
def set_entry(self, entry): self.entry = entry
def add_entry(self, value): self.entry.append(value)
def insert_entry(self, index, value): self.entry[index] = value
+
def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6828,25 +7776,31 @@ def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docRowType'):
for entry_ in self.entry:
entry_.export(outfile, level, namespace_, name_='entry')
+
def hasContent_(self):
if (
self.entry is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docRowType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('entry=[\n')
@@ -6860,17 +7814,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'entry':
+ nodeName_ == 'entry':
obj_ = docEntryType.factory()
obj_.build(child_)
self.entry.append(obj_)
@@ -6880,12 +7837,14 @@ def buildChildren(self, child_, nodeName_):
class docEntryType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, thead=None, para=None):
self.thead = thead
if para is None:
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docEntryType.subclass:
return docEntryType.subclass(*args_, **kwargs_)
@@ -6898,6 +7857,7 @@ def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_thead(self): return self.thead
def set_thead(self, thead): self.thead = thead
+
def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6909,28 +7869,34 @@ def export(self, outfile, level, namespace_='', name_='docEntryType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'):
if self.thead is not None:
outfile.write(' thead=%s' % (quote_attrib(self.thead), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.thead is not None:
showIndent(outfile, level)
outfile.write('thead = "%s",\n' % (self.thead,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -6944,18 +7910,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('thead'):
self.thead = attrs.get('thead').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -6965,6 +7934,7 @@ def buildChildren(self, child_, nodeName_):
class docCaptionType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -6974,6 +7944,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docCaptionType.subclass:
return docCaptionType.subclass(*args_, **kwargs_)
@@ -6982,40 +7953,49 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docCaptionType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docCaptionType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docCaptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7023,23 +8003,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docCaptionType
class docHeadingType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
self.level = level
if mixedclass_ is None:
@@ -7050,6 +8033,7 @@ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docHeadingType.subclass:
return docHeadingType.subclass(*args_, **kwargs_)
@@ -7060,43 +8044,53 @@ def get_level(self): return self.level
def set_level(self, level): self.level = level
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docHeadingType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docHeadingType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'):
if self.level is not None:
- outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level'))
+ outfile.write(' level="%s"' % self.format_integer(
+ self.level, input_name='level'))
+
def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docHeadingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.level is not None:
showIndent(outfile, level)
outfile.write('level = %s,\n' % (self.level,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7104,27 +8098,30 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('level'):
try:
self.level = int(attrs.get('level').value)
- except ValueError, exp:
+ except ValueError as exp:
raise ValueError('Bad integer attribute (level): %s' % exp)
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docHeadingType
class docImageType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
self.width = width
self.type_ = type_
@@ -7138,6 +8135,7 @@ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='',
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docImageType.subclass:
return docImageType.subclass(*args_, **kwargs_)
@@ -7154,6 +8152,7 @@ def get_height(self): return self.height
def set_height(self, height): self.height = height
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -7161,35 +8160,43 @@ def export(self, outfile, level, namespace_='', name_='docImageType', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'):
if self.width is not None:
- outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), ))
+ outfile.write(' width=%s' % (self.format_string(quote_attrib(
+ self.width).encode(ExternalEncoding), input_name='width'), ))
if self.type_ is not None:
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.name is not None:
- outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(
+ self.name).encode(ExternalEncoding), input_name='name'), ))
if self.height is not None:
- outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), ))
+ outfile.write(' height=%s' % (self.format_string(quote_attrib(
+ self.height).encode(ExternalEncoding), input_name='height'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docImageType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docImageType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.width is not None:
showIndent(outfile, level)
@@ -7203,9 +8210,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.height is not None:
showIndent(outfile, level)
outfile.write('height = %s,\n' % (self.height,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7213,6 +8222,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('width'):
self.width = attrs.get('width').value
@@ -7222,21 +8232,23 @@ def buildAttributes(self, attrs):
self.name = attrs.get('name').value
if attrs.get('height'):
self.height = attrs.get('height').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docImageType
class docDotFileType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
self.name = name
if mixedclass_ is None:
@@ -7247,6 +8259,7 @@ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docDotFileType.subclass:
return docDotFileType.subclass(*args_, **kwargs_)
@@ -7257,43 +8270,53 @@ def get_name(self): return self.name
def set_name(self, name): self.name = name
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docDotFileType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docDotFileType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'):
if self.name is not None:
- outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(
+ self.name).encode(ExternalEncoding), input_name='name'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docDotFileType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name = %s,\n' % (self.name,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7301,24 +8324,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('name'):
self.name = attrs.get('name').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docDotFileType
class docTocItemType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -7329,6 +8355,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docTocItemType.subclass:
return docTocItemType.subclass(*args_, **kwargs_)
@@ -7339,43 +8366,53 @@ def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTocItemType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docTocItemType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTocItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7383,29 +8420,33 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docTocItemType
class docTocListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, tocitem=None):
if tocitem is None:
self.tocitem = []
else:
self.tocitem = tocitem
+
def factory(*args_, **kwargs_):
if docTocListType.subclass:
return docTocListType.subclass(*args_, **kwargs_)
@@ -7416,10 +8457,12 @@ def get_tocitem(self): return self.tocitem
def set_tocitem(self, tocitem): self.tocitem = tocitem
def add_tocitem(self, value): self.tocitem.append(value)
def insert_tocitem(self, index, value): self.tocitem[index] = value
+
def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTocListType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docTocListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7427,25 +8470,31 @@ def export(self, outfile, level, namespace_='', name_='docTocListType', namespac
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'):
for tocitem_ in self.tocitem:
tocitem_.export(outfile, level, namespace_, name_='tocitem')
+
def hasContent_(self):
if (
self.tocitem is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTocListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('tocitem=[\n')
@@ -7459,17 +8508,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'tocitem':
+ nodeName_ == 'tocitem':
obj_ = docTocItemType.factory()
obj_.build(child_)
self.tocitem.append(obj_)
@@ -7479,12 +8531,14 @@ def buildChildren(self, child_, nodeName_):
class docLanguageType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, langid=None, para=None):
self.langid = langid
if para is None:
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docLanguageType.subclass:
return docLanguageType.subclass(*args_, **kwargs_)
@@ -7497,10 +8551,12 @@ def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_langid(self): return self.langid
def set_langid(self, langid): self.langid = langid
+
def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docLanguageType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docLanguageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7508,28 +8564,35 @@ def export(self, outfile, level, namespace_='', name_='docLanguageType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'):
if self.langid is not None:
- outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), ))
+ outfile.write(' langid=%s' % (self.format_string(quote_attrib(
+ self.langid).encode(ExternalEncoding), input_name='langid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docLanguageType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.langid is not None:
showIndent(outfile, level)
outfile.write('langid = %s,\n' % (self.langid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -7543,18 +8606,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('langid'):
self.langid = attrs.get('langid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -7564,12 +8630,14 @@ def buildChildren(self, child_, nodeName_):
class docParamListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, parameteritem=None):
self.kind = kind
if parameteritem is None:
self.parameteritem = []
else:
self.parameteritem = parameteritem
+
def factory(*args_, **kwargs_):
if docParamListType.subclass:
return docParamListType.subclass(*args_, **kwargs_)
@@ -7577,15 +8645,21 @@ def factory(*args_, **kwargs_):
return docParamListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parameteritem(self): return self.parameteritem
- def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem
+ def set_parameteritem(
+ self, parameteritem): self.parameteritem = parameteritem
+
def add_parameteritem(self, value): self.parameteritem.append(value)
- def insert_parameteritem(self, index, value): self.parameteritem[index] = value
+ def insert_parameteritem(
+ self, index, value): self.parameteritem[index] = value
+
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
+
def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamListType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docParamListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7593,28 +8667,35 @@ def export(self, outfile, level, namespace_='', name_='docParamListType', namesp
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'):
for parameteritem_ in self.parameteritem:
- parameteritem_.export(outfile, level, namespace_, name_='parameteritem')
+ parameteritem_.export(
+ outfile, level, namespace_, name_='parameteritem')
+
def hasContent_(self):
if (
self.parameteritem is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parameteritem=[\n')
@@ -7628,18 +8709,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameteritem':
+ nodeName_ == 'parameteritem':
obj_ = docParamListItem.factory()
obj_.build(child_)
self.parameteritem.append(obj_)
@@ -7649,12 +8733,14 @@ def buildChildren(self, child_, nodeName_):
class docParamListItem(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, parameternamelist=None, parameterdescription=None):
if parameternamelist is None:
self.parameternamelist = []
else:
self.parameternamelist = parameternamelist
self.parameterdescription = parameterdescription
+
def factory(*args_, **kwargs_):
if docParamListItem.subclass:
return docParamListItem.subclass(*args_, **kwargs_)
@@ -7662,15 +8748,25 @@ def factory(*args_, **kwargs_):
return docParamListItem(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parameternamelist(self): return self.parameternamelist
- def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist
- def add_parameternamelist(self, value): self.parameternamelist.append(value)
- def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value
+
+ def set_parameternamelist(
+ self, parameternamelist): self.parameternamelist = parameternamelist
+
+ def add_parameternamelist(
+ self, value): self.parameternamelist.append(value)
+ def insert_parameternamelist(
+ self, index, value): self.parameternamelist[index] = value
+
def get_parameterdescription(self): return self.parameterdescription
- def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription
+
+ def set_parameterdescription(
+ self, parameterdescription): self.parameterdescription = parameterdescription
+
def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamListItem')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docParamListItem')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7678,28 +8774,36 @@ def export(self, outfile, level, namespace_='', name_='docParamListItem', namesp
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'):
for parameternamelist_ in self.parameternamelist:
- parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist')
+ parameternamelist_.export(
+ outfile, level, namespace_, name_='parameternamelist')
if self.parameterdescription:
- self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', )
+ self.parameterdescription.export(
+ outfile, level, namespace_, name_='parameterdescription', )
+
def hasContent_(self):
if (
self.parameternamelist is not None or
self.parameterdescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamListItem'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parameternamelist=[\n')
@@ -7707,7 +8811,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for parameternamelist in self.parameternamelist:
showIndent(outfile, level)
outfile.write('model_.parameternamelist(\n')
- parameternamelist.exportLiteral(outfile, level, name_='parameternamelist')
+ parameternamelist.exportLiteral(
+ outfile, level, name_='parameternamelist')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -7716,25 +8821,29 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.parameterdescription:
showIndent(outfile, level)
outfile.write('parameterdescription=model_.descriptionType(\n')
- self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription')
+ self.parameterdescription.exportLiteral(
+ outfile, level, name_='parameterdescription')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameternamelist':
+ nodeName_ == 'parameternamelist':
obj_ = docParamNameList.factory()
obj_.build(child_)
self.parameternamelist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameterdescription':
+ nodeName_ == 'parameterdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_parameterdescription(obj_)
@@ -7744,11 +8853,13 @@ def buildChildren(self, child_, nodeName_):
class docParamNameList(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, parametername=None):
if parametername is None:
self.parametername = []
else:
self.parametername = parametername
+
def factory(*args_, **kwargs_):
if docParamNameList.subclass:
return docParamNameList.subclass(*args_, **kwargs_)
@@ -7756,13 +8867,19 @@ def factory(*args_, **kwargs_):
return docParamNameList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parametername(self): return self.parametername
- def set_parametername(self, parametername): self.parametername = parametername
+ def set_parametername(
+ self, parametername): self.parametername = parametername
+
def add_parametername(self, value): self.parametername.append(value)
- def insert_parametername(self, index, value): self.parametername[index] = value
+
+ def insert_parametername(
+ self, index, value): self.parametername[index] = value
+
def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docParamNameList')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7770,25 +8887,32 @@ def export(self, outfile, level, namespace_='', name_='docParamNameList', namesp
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
for parametername_ in self.parametername:
- parametername_.export(outfile, level, namespace_, name_='parametername')
+ parametername_.export(
+ outfile, level, namespace_, name_='parametername')
+
def hasContent_(self):
if (
self.parametername is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamNameList'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parametername=[\n')
@@ -7802,17 +8926,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parametername':
+ nodeName_ == 'parametername':
obj_ = docParamName.factory()
obj_.build(child_)
self.parametername.append(obj_)
@@ -7822,6 +8949,7 @@ def buildChildren(self, child_, nodeName_):
class docParamName(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
self.direction = direction
if mixedclass_ is None:
@@ -7832,6 +8960,7 @@ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docParamName.subclass:
return docParamName.subclass(*args_, **kwargs_)
@@ -7842,6 +8971,7 @@ def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_direction(self): return self.direction
def set_direction(self, direction): self.direction = direction
+
def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -7849,28 +8979,34 @@ def export(self, outfile, level, namespace_='', name_='docParamName', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'):
if self.direction is not None:
outfile.write(' direction=%s' % (quote_attrib(self.direction), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamName'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.ref is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamName'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.direction is not None:
showIndent(outfile, level)
outfile.write('direction = "%s",\n' % (self.direction,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -7878,26 +9014,29 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('direction'):
self.direction = attrs.get('direction').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
+ nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
+ MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docParamName
@@ -7905,6 +9044,7 @@ def buildChildren(self, child_, nodeName_):
class docXRefSectType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
self.id = id
if xreftitle is None:
@@ -7912,6 +9052,7 @@ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
else:
self.xreftitle = xreftitle
self.xrefdescription = xrefdescription
+
def factory(*args_, **kwargs_):
if docXRefSectType.subclass:
return docXRefSectType.subclass(*args_, **kwargs_)
@@ -7923,13 +9064,17 @@ def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle
def add_xreftitle(self, value): self.xreftitle.append(value)
def insert_xreftitle(self, index, value): self.xreftitle[index] = value
def get_xrefdescription(self): return self.xrefdescription
- def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription
+ def set_xrefdescription(
+ self, xrefdescription): self.xrefdescription = xrefdescription
+
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docXRefSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7937,66 +9082,80 @@ def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
for xreftitle_ in self.xreftitle:
showIndent(outfile, level)
- outfile.write('<%sxreftitle>%s%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
+ outfile.write('<%sxreftitle>%s%sxreftitle>\n' % (namespace_, self.format_string(
+ quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
if self.xrefdescription:
- self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', )
+ self.xrefdescription.export(
+ outfile, level, namespace_, name_='xrefdescription', )
+
def hasContent_(self):
if (
self.xreftitle is not None or
self.xrefdescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docXRefSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('xreftitle=[\n')
level += 1
for xreftitle in self.xreftitle:
showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding))
+ outfile.write('%s,\n' % quote_python(
+ xreftitle).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.xrefdescription:
showIndent(outfile, level)
outfile.write('xrefdescription=model_.descriptionType(\n')
- self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
+ self.xrefdescription.exportLiteral(
+ outfile, level, name_='xrefdescription')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'xreftitle':
+ nodeName_ == 'xreftitle':
xreftitle_ = ''
for text__content_ in child_.childNodes:
xreftitle_ += text__content_.nodeValue
self.xreftitle.append(xreftitle_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'xrefdescription':
+ nodeName_ == 'xrefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_xrefdescription(obj_)
@@ -8006,6 +9165,7 @@ def buildChildren(self, child_, nodeName_):
class docCopyType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, link=None, para=None, sect1=None, internal=None):
self.link = link
if para is None:
@@ -8017,6 +9177,7 @@ def __init__(self, link=None, para=None, sect1=None, internal=None):
else:
self.sect1 = sect1
self.internal = internal
+
def factory(*args_, **kwargs_):
if docCopyType.subclass:
return docCopyType.subclass(*args_, **kwargs_)
@@ -8035,6 +9196,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_link(self): return self.link
def set_link(self, link): self.link = link
+
def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -8046,9 +9208,12 @@ def export(self, outfile, level, namespace_='', name_='docCopyType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'):
if self.link is not None:
- outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), ))
+ outfile.write(' link=%s' % (self.format_string(quote_attrib(
+ self.link).encode(ExternalEncoding), input_name='link'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
@@ -8056,24 +9221,28 @@ def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
sect1_.export(outfile, level, namespace_, name_='sect1')
if self.internal:
self.internal.export(outfile, level, namespace_, name_='internal')
+
def hasContent_(self):
if (
self.para is not None or
self.sect1 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docCopyType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.link is not None:
showIndent(outfile, level)
outfile.write('link = %s,\n' % (self.link,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -8105,28 +9274,31 @@ def exportLiteralChildren(self, outfile, level, name_):
self.internal.exportLiteral(outfile, level, name_='internal')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('link'):
self.link = attrs.get('link').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
+ nodeName_ == 'sect1':
obj_ = docSect1Type.factory()
obj_.build(child_)
self.sect1.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
obj_ = docInternalType.factory()
obj_.build(child_)
self.set_internal(obj_)
@@ -8136,9 +9308,11 @@ def buildChildren(self, child_, nodeName_):
class docCharType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, char=None, valueOf_=''):
self.char = char
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if docCharType.subclass:
return docCharType.subclass(*args_, **kwargs_)
@@ -8149,6 +9323,7 @@ def get_char(self): return self.char
def set_char(self, char): self.char = char
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -8160,36 +9335,43 @@ def export(self, outfile, level, namespace_='', name_='docCharType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'):
if self.char is not None:
outfile.write(' char=%s' % (quote_attrib(self.char), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docCharType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docCharType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.char is not None:
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -8197,22 +9379,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('char'):
self.char = attrs.get('char').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docCharType
class docEmptyType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if docEmptyType.subclass:
return docEmptyType.subclass(*args_, **kwargs_)
@@ -8221,6 +9407,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -8232,33 +9419,40 @@ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docEmptyType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -8266,13 +9460,15 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docEmptyType
@@ -8282,8 +9478,9 @@ def buildChildren(self, child_, nodeName_):
-s Use the SAX parser, not the minidom parser.
"""
+
def usage():
- print USAGE_TEXT
+ print(USAGE_TEXT)
sys.exit(1)
@@ -8296,7 +9493,7 @@ def parse(inFileName):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygen",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -8309,7 +9506,7 @@ def parseString(inString):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygen",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -8338,5 +9535,4 @@ def main():
if __name__ == '__main__':
main()
#import pdb
- #pdb.run('main()')
-
+ # pdb.run('main()')
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/index.py b/gr-aistx/docs/doxygen/doxyxml/generated/index.py
index 7a70e14..7ffbdf1 100644
--- a/gr-aistx/docs/doxygen/doxyxml/generated/index.py
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/index.py
@@ -8,9 +8,10 @@
import os
import sys
-import compound
+from . import compound
+
+from . import indexsuper as supermod
-import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
@@ -32,6 +33,7 @@ def find_compounds_and_members(self, details):
return results
+
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
@@ -53,6 +55,7 @@ def find_members(self, details):
return results
+
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
@@ -62,6 +65,7 @@ class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
+
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
@@ -74,4 +78,3 @@ def parse(inFilename):
rootObj.build(rootNode)
return rootObj
-
diff --git a/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py b/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py
index a991530..b30e062 100644
--- a/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py
+++ b/gr-aistx/docs/doxygen/doxyxml/generated/indexsuper.py
@@ -4,9 +4,9 @@
# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
#
+
import sys
-import getopt
-from string import lower as str_lower
+
from xml.dom import minidom
from xml.dom import Node
@@ -19,17 +19,21 @@
try:
from generatedssuper import GeneratedsSuper
-except ImportError, exp:
+except ImportError as exp:
- class GeneratedsSuper:
+ class GeneratedsSuper(object):
def format_string(self, input_data, input_name=''):
return input_data
+
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
+
def format_float(self, input_data, input_name=''):
return '%f' % input_data
+
def format_double(self, input_data, input_name=''):
return '%e' % input_data
+
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
@@ -41,9 +45,9 @@ def format_boolean(self, input_data, input_name=''):
## from IPython.Shell import IPShellEmbed
## args = ''
-## ipshell = IPShellEmbed(args,
+# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
-## exit_msg = 'Leaving Interpreter, back to program.')
+# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
@@ -59,20 +63,23 @@ def format_boolean(self, input_data, input_name=''):
# Support/utility functions.
#
+
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
+
def quote_xml(inStr):
- s1 = (isinstance(inStr, basestring) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
+
def quote_attrib(inStr):
- s1 = (isinstance(inStr, basestring) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
@@ -86,6 +93,7 @@ def quote_attrib(inStr):
s1 = '"%s"' % s1
return s1
+
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
@@ -102,7 +110,7 @@ def quote_python(inStr):
return '"""%s"""' % s1
-class MixedContainer:
+class MixedContainer(object):
# Constants for category:
CategoryNone = 0
CategoryText = 1
@@ -117,26 +125,33 @@ class MixedContainer:
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
+
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
+
def getCategory(self):
return self.category
+
def getContenttype(self, content_type):
return self.content_type
+
def getValue(self):
return self.value
+
def getName(self):
return self.name
+
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
- self.value.export(outfile, level, namespace,name)
+ self.value.export(outfile, level, namespace, name)
+
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s%s>' % (self.name, self.value, self.name))
@@ -148,19 +163,20 @@ def exportSimple(self, outfile, level, name):
outfile.write('<%s>%f%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g%s>' % (self.name, self.value, self.name))
+
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s",\n' % \
- (self.category, self.content_type, self.name,))
+ outfile.write('MixedContainer(%d, %d, "%s",\n' %
+ (self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
@@ -171,6 +187,7 @@ def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
+
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
@@ -186,12 +203,14 @@ def get_container(self): return self.container
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, version=None, compound=None):
self.version = version
if compound is None:
self.compound = []
else:
self.compound = compound
+
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
@@ -204,6 +223,7 @@ def add_compound(self, value): self.compound.append(value)
def insert_compound(self, index, value): self.compound[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
+
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -215,27 +235,34 @@ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
- outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
+ outfile.write(' version=%s' % (self.format_string(quote_attrib(
+ self.version).encode(ExternalEncoding), input_name='version'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
for compound_ in self.compound:
compound_.export(outfile, level, namespace_, name_='compound')
+
def hasContent_(self):
if (
self.compound is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = %s,\n' % (self.version,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('compound=[\n')
@@ -249,18 +276,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compound':
+ nodeName_ == 'compound':
obj_ = CompoundType.factory()
obj_.build(child_)
self.compound.append(obj_)
@@ -270,6 +300,7 @@ def buildChildren(self, child_, nodeName_):
class CompoundType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, refid=None, name=None, member=None):
self.kind = kind
self.refid = refid
@@ -278,6 +309,7 @@ def __init__(self, kind=None, refid=None, name=None, member=None):
self.member = []
else:
self.member = member
+
def factory(*args_, **kwargs_):
if CompoundType.subclass:
return CompoundType.subclass(*args_, **kwargs_)
@@ -294,6 +326,7 @@ def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -305,28 +338,35 @@ def export(self, outfile, level, namespace_='', name_='CompoundType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
+
def hasContent_(self):
if (
self.name is not None or
self.member is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='CompoundType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
@@ -334,9 +374,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
@@ -349,26 +391,29 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'member':
+ nodeName_ == 'member':
obj_ = MemberType.factory()
obj_.build(child_)
self.member.append(obj_)
@@ -378,10 +423,12 @@ def buildChildren(self, child_, nodeName_):
class MemberType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, refid=None, name=None):
self.kind = kind
self.refid = refid
self.name = name
+
def factory(*args_, **kwargs_):
if MemberType.subclass:
return MemberType.subclass(*args_, **kwargs_)
@@ -394,6 +441,7 @@ def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -405,25 +453,32 @@ def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+
def hasContent_(self):
if (
self.name is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='MemberType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
@@ -431,23 +486,28 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
@@ -461,8 +521,9 @@ def buildChildren(self, child_, nodeName_):
-s Use the SAX parser, not the minidom parser.
"""
+
def usage():
- print USAGE_TEXT
+ print(USAGE_TEXT)
sys.exit(1)
@@ -475,7 +536,7 @@ def parse(inFileName):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -488,7 +549,7 @@ def parseString(inString):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -514,10 +575,7 @@ def main():
usage()
-
-
if __name__ == '__main__':
main()
#import pdb
- #pdb.run('main()')
-
+ # pdb.run('main()')
diff --git a/gr-aistx/docs/doxygen/doxyxml/text.py b/gr-aistx/docs/doxygen/doxyxml/text.py
index 629edd1..5291c1f 100644
--- a/gr-aistx/docs/doxygen/doxyxml/text.py
+++ b/gr-aistx/docs/doxygen/doxyxml/text.py
@@ -1,42 +1,34 @@
#
# Copyright 2010 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
Utilities for extracting text from generated classes.
"""
+
def is_string(txt):
if isinstance(txt, str):
return True
try:
- if isinstance(txt, unicode):
+ if isinstance(txt, str):
return True
except NameError:
pass
return False
+
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
+
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
@@ -49,7 +41,8 @@ def description_bit(obj):
elif is_string(obj):
return obj
else:
- raise StandardError('Expecting a string or something with content, content_ or value attribute')
+ raise Exception(
+ 'Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
diff --git a/gr-aistx/docs/doxygen/other/doxypy.py b/gr-aistx/docs/doxygen/other/doxypy.py
new file mode 100644
index 0000000..28b1664
--- /dev/null
+++ b/gr-aistx/docs/doxygen/other/doxypy.py
@@ -0,0 +1,446 @@
+#!/usr/bin/env python
+
+
+__applicationName__ = "doxypy"
+__blurb__ = """
+doxypy is an input filter for Doxygen. It preprocesses python
+files so that docstrings of classes and functions are reformatted
+into Doxygen-conform documentation blocks.
+"""
+
+__doc__ = __blurb__ + \
+ """
+In order to make Doxygen preprocess files through doxypy, simply
+add the following lines to your Doxyfile:
+ FILTER_SOURCE_FILES = YES
+ INPUT_FILTER = "python /path/to/doxypy.py"
+"""
+
+__version__ = "0.4.2"
+__date__ = "5th December 2008"
+__website__ = "http://code.foosel.org/doxypy"
+
+__author__ = (
+ "Philippe 'demod' Neumann (doxypy at demod dot org)",
+ "Gina 'foosel' Haeussge (gina at foosel dot net)"
+)
+
+__licenseName__ = "GPL v2"
+__license__ = """This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import sys
+import re
+
+from argparse import ArgumentParser
+
+
+class FSM(object):
+ """Implements a finite state machine.
+
+ Transitions are given as 4-tuples, consisting of an origin state, a target
+ state, a condition for the transition (given as a reference to a function
+ which gets called with a given piece of input) and a pointer to a function
+ to be called upon the execution of the given transition.
+ """
+
+ """
+ @var transitions holds the transitions
+ @var current_state holds the current state
+ @var current_input holds the current input
+ @var current_transition hold the currently active transition
+ """
+
+ def __init__(self, start_state=None, transitions=[]):
+ self.transitions = transitions
+ self.current_state = start_state
+ self.current_input = None
+ self.current_transition = None
+
+ def setStartState(self, state):
+ self.current_state = state
+
+ def addTransition(self, from_state, to_state, condition, callback):
+ self.transitions.append([from_state, to_state, condition, callback])
+
+ def makeTransition(self, input):
+ """ Makes a transition based on the given input.
+
+ @param input input to parse by the FSM
+ """
+ for transition in self.transitions:
+ [from_state, to_state, condition, callback] = transition
+ if from_state == self.current_state:
+ match = condition(input)
+ if match:
+ self.current_state = to_state
+ self.current_input = input
+ self.current_transition = transition
+ if args.debug:
+ print("# FSM: executing (%s -> %s) for line '%s'" %
+ (from_state, to_state, input), file=sys.stderr)
+ callback(match)
+ return
+
+
+class Doxypy(object):
+ def __init__(self):
+ string_prefixes = "[uU]?[rR]?"
+
+ self.start_single_comment_re = re.compile(
+ r"^\s*%s(''')" % string_prefixes)
+ self.end_single_comment_re = re.compile(r"(''')\s*$")
+
+ self.start_double_comment_re = re.compile(
+ r'^\s*%s(""")' % string_prefixes)
+ self.end_double_comment_re = re.compile(r'(""")\s*$')
+
+ self.single_comment_re = re.compile(
+ r"^\s*%s(''').*(''')\s*$" % string_prefixes)
+ self.double_comment_re = re.compile(
+ r'^\s*%s(""").*(""")\s*$' % string_prefixes)
+
+ self.defclass_re = re.compile(r"^(\s*)(def .+:|class .+:)")
+ self.empty_re = re.compile(r"^\s*$")
+ self.hashline_re = re.compile(r"^\s*#.*$")
+ self.importline_re = re.compile(r"^\s*(import |from .+ import)")
+
+ self.multiline_defclass_start_re = re.compile(
+ r"^(\s*)(def|class)(\s.*)?$")
+ self.multiline_defclass_end_re = re.compile(r":\s*$")
+
+ # Transition list format
+ # ["FROM", "TO", condition, action]
+ transitions = [
+ # FILEHEAD
+
+ # single line comments
+ ["FILEHEAD", "FILEHEAD", self.single_comment_re.search,
+ self.appendCommentLine],
+ ["FILEHEAD", "FILEHEAD", self.double_comment_re.search,
+ self.appendCommentLine],
+
+ # multiline comments
+ ["FILEHEAD", "FILEHEAD_COMMENT_SINGLE",
+ self.start_single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD",
+ self.end_single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD_COMMENT_SINGLE",
+ self.catchall, self.appendCommentLine],
+ ["FILEHEAD", "FILEHEAD_COMMENT_DOUBLE",
+ self.start_double_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD",
+ self.end_double_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD_COMMENT_DOUBLE",
+ self.catchall, self.appendCommentLine],
+
+ # other lines
+ ["FILEHEAD", "FILEHEAD", self.empty_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "FILEHEAD", self.hashline_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "FILEHEAD", self.importline_re.search,
+ self.appendFileheadLine],
+ ["FILEHEAD", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
+ ["FILEHEAD", "DEFCLASS_MULTI",
+ self.multiline_defclass_start_re.search, self.resetCommentSearch],
+ ["FILEHEAD", "DEFCLASS_BODY", self.catchall, self.appendFileheadLine],
+
+ # DEFCLASS
+
+ # single line comments
+ ["DEFCLASS", "DEFCLASS_BODY",
+ self.single_comment_re.search, self.appendCommentLine],
+ ["DEFCLASS", "DEFCLASS_BODY",
+ self.double_comment_re.search, self.appendCommentLine],
+
+ # multiline comments
+ ["DEFCLASS", "COMMENT_SINGLE",
+ self.start_single_comment_re.search, self.appendCommentLine],
+ ["COMMENT_SINGLE", "DEFCLASS_BODY",
+ self.end_single_comment_re.search, self.appendCommentLine],
+ ["COMMENT_SINGLE", "COMMENT_SINGLE",
+ self.catchall, self.appendCommentLine],
+ ["DEFCLASS", "COMMENT_DOUBLE",
+ self.start_double_comment_re.search, self.appendCommentLine],
+ ["COMMENT_DOUBLE", "DEFCLASS_BODY",
+ self.end_double_comment_re.search, self.appendCommentLine],
+ ["COMMENT_DOUBLE", "COMMENT_DOUBLE",
+ self.catchall, self.appendCommentLine],
+
+ # other lines
+ ["DEFCLASS", "DEFCLASS", self.empty_re.search, self.appendDefclassLine],
+ ["DEFCLASS", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
+ ["DEFCLASS", "DEFCLASS_MULTI",
+ self.multiline_defclass_start_re.search, self.resetCommentSearch],
+ ["DEFCLASS", "DEFCLASS_BODY", self.catchall, self.stopCommentSearch],
+
+ # DEFCLASS_BODY
+
+ ["DEFCLASS_BODY", "DEFCLASS",
+ self.defclass_re.search, self.startCommentSearch],
+ ["DEFCLASS_BODY", "DEFCLASS_MULTI",
+ self.multiline_defclass_start_re.search, self.startCommentSearch],
+ ["DEFCLASS_BODY", "DEFCLASS_BODY", self.catchall, self.appendNormalLine],
+
+ # DEFCLASS_MULTI
+ ["DEFCLASS_MULTI", "DEFCLASS",
+ self.multiline_defclass_end_re.search, self.appendDefclassLine],
+ ["DEFCLASS_MULTI", "DEFCLASS_MULTI",
+ self.catchall, self.appendDefclassLine],
+ ]
+
+ self.fsm = FSM("FILEHEAD", transitions)
+ self.outstream = sys.stdout
+
+ self.output = []
+ self.comment = []
+ self.filehead = []
+ self.defclass = []
+ self.indent = ""
+
+ def __closeComment(self):
+ """Appends any open comment block and triggering block to the output."""
+
+ if args.autobrief:
+ if len(self.comment) == 1 \
+ or (len(self.comment) > 2 and self.comment[1].strip() == ''):
+ self.comment[0] = self.__docstringSummaryToBrief(
+ self.comment[0])
+
+ if self.comment:
+ block = self.makeCommentBlock()
+ self.output.extend(block)
+
+ if self.defclass:
+ self.output.extend(self.defclass)
+
+ def __docstringSummaryToBrief(self, line):
+ """Adds \\brief to the docstrings summary line.
+
+ A \\brief is prepended, provided no other doxygen command is at the
+ start of the line.
+ """
+ stripped = line.strip()
+ if stripped and not stripped[0] in ('@', '\\'):
+ return "\\brief " + line
+ else:
+ return line
+
+ def __flushBuffer(self):
+ """Flushes the current outputbuffer to the outstream."""
+ if self.output:
+ try:
+ if args.debug:
+ print("# OUTPUT: ", self.output, file=sys.stderr)
+ print("\n".join(self.output), file=self.outstream)
+ self.outstream.flush()
+ except IOError:
+ # Fix for FS#33. Catches "broken pipe" when doxygen closes
+ # stdout prematurely upon usage of INPUT_FILTER, INLINE_SOURCES
+ # and FILTER_SOURCE_FILES.
+ pass
+ self.output = []
+
+ def catchall(self, input):
+ """The catchall-condition, always returns true."""
+ return True
+
+ def resetCommentSearch(self, match):
+ """Restarts a new comment search for a different triggering line.
+
+ Closes the current commentblock and starts a new comment search.
+ """
+ if args.debug:
+ print("# CALLBACK: resetCommentSearch", file=sys.stderr)
+ self.__closeComment()
+ self.startCommentSearch(match)
+
+ def startCommentSearch(self, match):
+ """Starts a new comment search.
+
+ Saves the triggering line, resets the current comment and saves
+ the current indentation.
+ """
+ if args.debug:
+ print("# CALLBACK: startCommentSearch", file=sys.stderr)
+ self.defclass = [self.fsm.current_input]
+ self.comment = []
+ self.indent = match.group(1)
+
+ def stopCommentSearch(self, match):
+ """Stops a comment search.
+
+ Closes the current commentblock, resets the triggering line and
+ appends the current line to the output.
+ """
+ if args.debug:
+ print("# CALLBACK: stopCommentSearch", file=sys.stderr)
+ self.__closeComment()
+
+ self.defclass = []
+ self.output.append(self.fsm.current_input)
+
+ def appendFileheadLine(self, match):
+ """Appends a line in the FILEHEAD state.
+
+ Closes the open comment block, resets it and appends the current line.
+ """
+ if args.debug:
+ print("# CALLBACK: appendFileheadLine", file=sys.stderr)
+ self.__closeComment()
+ self.comment = []
+ self.output.append(self.fsm.current_input)
+
+ def appendCommentLine(self, match):
+ """Appends a comment line.
+
+ The comment delimiter is removed from multiline start and ends as
+ well as singleline comments.
+ """
+ if args.debug:
+ print("# CALLBACK: appendCommentLine", file=sys.stderr)
+ (from_state, to_state, condition, callback) = self.fsm.current_transition
+
+ # single line comment
+ if (from_state == "DEFCLASS" and to_state == "DEFCLASS_BODY") \
+ or (from_state == "FILEHEAD" and to_state == "FILEHEAD"):
+ # remove comment delimiter from begin and end of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[line.find(
+ activeCommentDelim) + len(activeCommentDelim):line.rfind(activeCommentDelim)])
+
+ if (to_state == "DEFCLASS_BODY"):
+ self.__closeComment()
+ self.defclass = []
+ # multiline start
+ elif from_state == "DEFCLASS" or from_state == "FILEHEAD":
+ # remove comment delimiter from begin of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(
+ line[line.find(activeCommentDelim) + len(activeCommentDelim):])
+ # multiline end
+ elif to_state == "DEFCLASS_BODY" or to_state == "FILEHEAD":
+ # remove comment delimiter from end of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[0:line.rfind(activeCommentDelim)])
+ if (to_state == "DEFCLASS_BODY"):
+ self.__closeComment()
+ self.defclass = []
+ # in multiline comment
+ else:
+ # just append the comment line
+ self.comment.append(self.fsm.current_input)
+
+ def appendNormalLine(self, match):
+ """Appends a line to the output."""
+ if args.debug:
+ print("# CALLBACK: appendNormalLine", file=sys.stderr)
+ self.output.append(self.fsm.current_input)
+
+ def appendDefclassLine(self, match):
+ """Appends a line to the triggering block."""
+ if args.debug:
+ print("# CALLBACK: appendDefclassLine", file=sys.stderr)
+ self.defclass.append(self.fsm.current_input)
+
+ def makeCommentBlock(self):
+ """Indents the current comment block with respect to the current
+ indentation level.
+
+ @returns a list of indented comment lines
+ """
+ doxyStart = "##"
+ commentLines = self.comment
+
+ commentLines = ["%s# %s" % (self.indent, x) for x in commentLines]
+ l = [self.indent + doxyStart]
+ l.extend(commentLines)
+
+ return l
+
+ def parse(self, input):
+ """Parses a python file given as input string and returns the doxygen-
+ compatible representation.
+
+ @param input the python code to parse
+ @returns the modified python code
+ """
+ lines = input.split("\n")
+
+ for line in lines:
+ self.fsm.makeTransition(line)
+
+ if self.fsm.current_state == "DEFCLASS":
+ self.__closeComment()
+
+ return "\n".join(self.output)
+
+ def parseFile(self, filename):
+ """Parses a python file given as input string and returns the doxygen-
+ compatible representation.
+
+ @param input the python code to parse
+ @returns the modified python code
+ """
+ f = open(filename, 'r')
+
+ for line in f:
+ self.parseLine(line.rstrip('\r\n'))
+ if self.fsm.current_state == "DEFCLASS":
+ self.__closeComment()
+ self.__flushBuffer()
+ f.close()
+
+ def parseLine(self, line):
+ """Parse one line of python and flush the resulting output to the
+ outstream.
+
+ @param line the python code line to parse
+ """
+ self.fsm.makeTransition(line)
+ self.__flushBuffer()
+
+
+def argParse():
+ """Parses commandline args."""
+ parser = ArgumentParser(prog=__applicationName__)
+
+ parser.add_argument("--version", action="version",
+ version="%(prog)s " + __version__
+ )
+ parser.add_argument("--autobrief", action="store_true",
+ help="use the docstring summary line as \\brief description"
+ )
+ parser.add_argument("--debug", action="store_true",
+ help="enable debug output on stderr"
+ )
+ parser.add_argument("filename", metavar="FILENAME")
+
+ return parser.parse_args()
+
+
+def main():
+ """Starts the parser on the file given by the filename as the first
+ argument on the commandline.
+ """
+ global args
+ args = argParse()
+ fsm = Doxypy()
+ fsm.parseFile(args.filename)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/gr-aistx/docs/doxygen/other/group_defs.dox b/gr-aistx/docs/doxygen/other/group_defs.dox
index 45e2026..97962bd 100644
--- a/gr-aistx/docs/doxygen/other/group_defs.dox
+++ b/gr-aistx/docs/doxygen/other/group_defs.dox
@@ -4,4 +4,3 @@
* module are listed here or in the subcategories below.
*
*/
-
diff --git a/gr-aistx/docs/doxygen/pydoc_macros.h b/gr-aistx/docs/doxygen/pydoc_macros.h
new file mode 100644
index 0000000..fb3954b
--- /dev/null
+++ b/gr-aistx/docs/doxygen/pydoc_macros.h
@@ -0,0 +1,19 @@
+#ifndef PYDOC_MACROS_H
+#define PYDOC_MACROS_H
+
+#define __EXPAND(x) x
+#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
+#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
+#define __CAT1(a, b) a##b
+#define __CAT2(a, b) __CAT1(a, b)
+#define __DOC1(n1) __doc_##n1
+#define __DOC2(n1, n2) __doc_##n1##_##n2
+#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
+#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
+#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
+#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
+#define __DOC7(n1, n2, n3, n4, n5, n6, n7) \
+ __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
+#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
+
+#endif // PYDOC_MACROS_H
diff --git a/gr-aistx/docs/doxygen/swig_doc.py b/gr-aistx/docs/doxygen/swig_doc.py
deleted file mode 100644
index 4e1ce2e..0000000
--- a/gr-aistx/docs/doxygen/swig_doc.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#
-# Copyright 2010,2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-#
-"""
-Creates the swig_doc.i SWIG interface file.
-Execute using: python swig_doc.py xml_path outputfilename
-
-The file instructs SWIG to transfer the doxygen comments into the
-python docstrings.
-
-"""
-
-import sys
-
-try:
- from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
-except ImportError:
- from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
-
-
-def py_name(name):
- bits = name.split('_')
- return '_'.join(bits[1:])
-
-def make_name(name):
- bits = name.split('_')
- return bits[0] + '_make_' + '_'.join(bits[1:])
-
-
-class Block(object):
- """
- Checks if doxyxml produced objects correspond to a gnuradio block.
- """
-
- @classmethod
- def includes(cls, item):
- if not isinstance(item, DoxyClass):
- return False
- # Check for a parsing error.
- if item.error():
- return False
- return item.has_member(make_name(item.name()), DoxyFriend)
-
-
-def utoascii(text):
- """
- Convert unicode text into ascii and escape quotes.
- """
- if text is None:
- return ''
- out = text.encode('ascii', 'replace')
- out = out.replace('"', '\\"')
- return out
-
-
-def combine_descriptions(obj):
- """
- Combines the brief and detailed descriptions of an object together.
- """
- description = []
- bd = obj.brief_description.strip()
- dd = obj.detailed_description.strip()
- if bd:
- description.append(bd)
- if dd:
- description.append(dd)
- return utoascii('\n\n'.join(description)).strip()
-
-
-entry_templ = '%feature("docstring") {name} "{docstring}"'
-def make_entry(obj, name=None, templ="{description}", description=None):
- """
- Create a docstring entry for a swig interface file.
-
- obj - a doxyxml object from which documentation will be extracted.
- name - the name of the C object (defaults to obj.name())
- templ - an optional template for the docstring containing only one
- variable named 'description'.
- description - if this optional variable is set then it's value is
- used as the description instead of extracting it from obj.
- """
- if name is None:
- name=obj.name()
- if "operator " in name:
- return ''
- if description is None:
- description = combine_descriptions(obj)
- docstring = templ.format(description=description)
- if not docstring:
- return ''
- return entry_templ.format(
- name=name,
- docstring=docstring,
- )
-
-
-def make_func_entry(func, name=None, description=None, params=None):
- """
- Create a function docstring entry for a swig interface file.
-
- func - a doxyxml object from which documentation will be extracted.
- name - the name of the C object (defaults to func.name())
- description - if this optional variable is set then it's value is
- used as the description instead of extracting it from func.
- params - a parameter list that overrides using func.params.
- """
- if params is None:
- params = func.params
- params = [prm.declname for prm in params]
- if params:
- sig = "Params: (%s)" % ", ".join(params)
- else:
- sig = "Params: (NONE)"
- templ = "{description}\n\n" + sig
- return make_entry(func, name=name, templ=utoascii(templ),
- description=description)
-
-
-def make_class_entry(klass, description=None):
- """
- Create a class docstring for a swig interface file.
- """
- output = []
- output.append(make_entry(klass, description=description))
- for func in klass.in_category(DoxyFunction):
- name = klass.name() + '::' + func.name()
- output.append(make_func_entry(func, name=name))
- return "\n\n".join(output)
-
-
-def make_block_entry(di, block):
- """
- Create class and function docstrings of a gnuradio block for a
- swig interface file.
- """
- descriptions = []
- # Get the documentation associated with the class.
- class_desc = combine_descriptions(block)
- if class_desc:
- descriptions.append(class_desc)
- # Get the documentation associated with the make function
- make_func = di.get_member(make_name(block.name()), DoxyFunction)
- make_func_desc = combine_descriptions(make_func)
- if make_func_desc:
- descriptions.append(make_func_desc)
- # Get the documentation associated with the file
- try:
- block_file = di.get_member(block.name() + ".h", DoxyFile)
- file_desc = combine_descriptions(block_file)
- if file_desc:
- descriptions.append(file_desc)
- except base.Base.NoSuchMember:
- # Don't worry if we can't find a matching file.
- pass
- # And join them all together to make a super duper description.
- super_description = "\n\n".join(descriptions)
- # Associate the combined description with the class and
- # the make function.
- output = []
- output.append(make_class_entry(block, description=super_description))
- creator = block.get_member(block.name(), DoxyFunction)
- output.append(make_func_entry(make_func, description=super_description,
- params=creator.params))
- return "\n\n".join(output)
-
-
-def make_swig_interface_file(di, swigdocfilename, custom_output=None):
-
- output = ["""
-/*
- * This file was automatically generated using swig_doc.py.
- *
- * Any changes to it will be lost next time it is regenerated.
- */
-"""]
-
- if custom_output is not None:
- output.append(custom_output)
-
- # Create docstrings for the blocks.
- blocks = di.in_category(Block)
- make_funcs = set([])
- for block in blocks:
- try:
- make_func = di.get_member(make_name(block.name()), DoxyFunction)
- make_funcs.add(make_func.name())
- output.append(make_block_entry(di, block))
- except block.ParsingError:
- print('Parsing error for block %s' % block.name())
-
- # Create docstrings for functions
- # Don't include the make functions since they have already been dealt with.
- funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs]
- for f in funcs:
- try:
- output.append(make_func_entry(f))
- except f.ParsingError:
- print('Parsing error for function %s' % f.name())
-
- # Create docstrings for classes
- block_names = [block.name() for block in blocks]
- klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names]
- for k in klasses:
- try:
- output.append(make_class_entry(k))
- except k.ParsingError:
- print('Parsing error for class %s' % k.name())
-
- # Docstrings are not created for anything that is not a function or a class.
- # If this excludes anything important please add it here.
-
- output = "\n\n".join(output)
-
- swig_doc = file(swigdocfilename, 'w')
- swig_doc.write(output)
- swig_doc.close()
-
-if __name__ == "__main__":
- # Parse command line options and set up doxyxml.
- err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
- if len(sys.argv) != 3:
- raise StandardError(err_msg)
- xml_path = sys.argv[1]
- swigdocfilename = sys.argv[2]
- di = DoxyIndex(xml_path)
-
- # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
- # This is presumably a bug in SWIG.
- #msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
- #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
- #delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
- output = []
- #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
- #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
- custom_output = "\n\n".join(output)
-
- # Generate the docstrings interface file.
- make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
diff --git a/gr-aistx/docs/doxygen/update_pydoc.py b/gr-aistx/docs/doxygen/update_pydoc.py
new file mode 100644
index 0000000..b65e168
--- /dev/null
+++ b/gr-aistx/docs/doxygen/update_pydoc.py
@@ -0,0 +1,372 @@
+#
+# Copyright 2010-2012 Free Software Foundation, Inc.
+#
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gnuradio
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+#
+"""
+Updates the *pydoc_h files for a module
+Execute using: python update_pydoc.py xml_path outputfilename
+
+The file instructs Pybind11 to transfer the doxygen comments into the
+python docstrings.
+
+"""
+
+import os
+import sys
+import time
+import glob
+import re
+import json
+from argparse import ArgumentParser
+
+from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
+from doxyxml import DoxyOther, base
+
+
+def py_name(name):
+ bits = name.split('_')
+ return '_'.join(bits[1:])
+
+
+def make_name(name):
+ bits = name.split('_')
+ return bits[0] + '_make_' + '_'.join(bits[1:])
+
+
+class Block(object):
+ """
+ Checks if doxyxml produced objects correspond to a gnuradio block.
+ """
+
+ @classmethod
+ def includes(cls, item):
+ if not isinstance(item, DoxyClass):
+ return False
+ # Check for a parsing error.
+ if item.error():
+ return False
+ friendname = make_name(item.name())
+ is_a_block = item.has_member(friendname, DoxyFriend)
+ # But now sometimes the make function isn't a friend so check again.
+ if not is_a_block:
+ is_a_block = di.has_member(friendname, DoxyFunction)
+ return is_a_block
+
+
+class Block2(object):
+ """
+ Checks if doxyxml produced objects correspond to a new style
+ gnuradio block.
+ """
+
+ @classmethod
+ def includes(cls, item):
+ if not isinstance(item, DoxyClass):
+ return False
+ # Check for a parsing error.
+ if item.error():
+ return False
+ is_a_block2 = item.has_member(
+ 'make', DoxyFunction) and item.has_member('sptr', DoxyOther)
+ return is_a_block2
+
+
+def utoascii(text):
+ """
+ Convert unicode text into ascii and escape quotes and backslashes.
+ """
+ if text is None:
+ return ''
+ out = text.encode('ascii', 'replace')
+ # swig will require us to replace blackslash with 4 backslashes
+ # TODO: evaluate what this should be for pybind11
+ out = out.replace(b'\\', b'\\\\\\\\')
+ out = out.replace(b'"', b'\\"').decode('ascii')
+ return str(out)
+
+
+def combine_descriptions(obj):
+ """
+ Combines the brief and detailed descriptions of an object together.
+ """
+ description = []
+ bd = obj.brief_description.strip()
+ dd = obj.detailed_description.strip()
+ if bd:
+ description.append(bd)
+ if dd:
+ description.append(dd)
+ return utoascii('\n\n'.join(description)).strip()
+
+
+def format_params(parameteritems):
+ output = ['Args:']
+ template = ' {0} : {1}'
+ for pi in parameteritems:
+ output.append(template.format(pi.name, pi.description))
+ return '\n'.join(output)
+
+
+entry_templ = '%feature("docstring") {name} "{docstring}"'
+
+
+def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
+ """
+ Create a docstring key/value pair, where the key is the object name.
+
+ obj - a doxyxml object from which documentation will be extracted.
+ name - the name of the C object (defaults to obj.name())
+ templ - an optional template for the docstring containing only one
+ variable named 'description'.
+ description - if this optional variable is set then it's value is
+ used as the description instead of extracting it from obj.
+ """
+ if name is None:
+ name = obj.name()
+ if hasattr(obj, '_parse_data') and hasattr(obj._parse_data, 'definition'):
+ name = obj._parse_data.definition.split(' ')[-1]
+ if "operator " in name:
+ return ''
+ if description is None:
+ description = combine_descriptions(obj)
+ if params:
+ description += '\n\n'
+ description += utoascii(format_params(params))
+ docstring = templ.format(description=description)
+
+ return {name: docstring}
+
+
+def make_class_entry(klass, description=None, ignored_methods=[], params=None):
+ """
+ Create a class docstring key/value pair.
+ """
+ if params is None:
+ params = klass.params
+ output = {}
+ output.update(make_entry(klass, description=description, params=params))
+ for func in klass.in_category(DoxyFunction):
+ if func.name() not in ignored_methods:
+ name = klass.name() + '::' + func.name()
+ output.update(make_entry(func, name=name))
+ return output
+
+
+def make_block_entry(di, block):
+ """
+ Create class and function docstrings of a gnuradio block
+ """
+ descriptions = []
+ # Get the documentation associated with the class.
+ class_desc = combine_descriptions(block)
+ if class_desc:
+ descriptions.append(class_desc)
+ # Get the documentation associated with the make function
+ make_func = di.get_member(make_name(block.name()), DoxyFunction)
+ make_func_desc = combine_descriptions(make_func)
+ if make_func_desc:
+ descriptions.append(make_func_desc)
+ # Get the documentation associated with the file
+ try:
+ block_file = di.get_member(block.name() + ".h", DoxyFile)
+ file_desc = combine_descriptions(block_file)
+ if file_desc:
+ descriptions.append(file_desc)
+ except base.Base.NoSuchMember:
+ # Don't worry if we can't find a matching file.
+ pass
+ # And join them all together to make a super duper description.
+ super_description = "\n\n".join(descriptions)
+ # Associate the combined description with the class and
+ # the make function.
+ output = {}
+ output.update(make_class_entry(block, description=super_description))
+ output.update(make_entry(make_func, description=super_description,
+ params=block.params))
+ return output
+
+
+def make_block2_entry(di, block):
+ """
+ Create class and function docstrings of a new style gnuradio block
+ """
+ # For new style blocks all the relevant documentation should be
+ # associated with the 'make' method.
+ class_description = combine_descriptions(block)
+ make_func = block.get_member('make', DoxyFunction)
+ make_description = combine_descriptions(make_func)
+ description = class_description + \
+ "\n\nConstructor Specific Documentation:\n\n" + make_description
+ # Associate the combined description with the class and
+ # the make function.
+ output = {}
+ output.update(make_class_entry(
+ block, description=description,
+ ignored_methods=['make'], params=make_func.params))
+ makename = block.name() + '::make'
+ output.update(make_entry(
+ make_func, name=makename, description=description,
+ params=make_func.params))
+ return output
+
+
+def get_docstrings_dict(di, custom_output=None):
+
+ output = {}
+ if custom_output:
+ output.update(custom_output)
+
+ # Create docstrings for the blocks.
+ blocks = di.in_category(Block)
+ blocks2 = di.in_category(Block2)
+
+ make_funcs = set([])
+ for block in blocks:
+ try:
+ make_func = di.get_member(make_name(block.name()), DoxyFunction)
+ # Don't want to risk writing to output twice.
+ if make_func.name() not in make_funcs:
+ make_funcs.add(make_func.name())
+ output.update(make_block_entry(di, block))
+ except block.ParsingError:
+ sys.stderr.write(
+ 'Parsing error for block {0}\n'.format(block.name()))
+ raise
+
+ for block in blocks2:
+ try:
+ make_func = block.get_member('make', DoxyFunction)
+ make_func_name = block.name() + '::make'
+ # Don't want to risk writing to output twice.
+ if make_func_name not in make_funcs:
+ make_funcs.add(make_func_name)
+ output.update(make_block2_entry(di, block))
+ except block.ParsingError:
+ sys.stderr.write(
+ 'Parsing error for block {0}\n'.format(block.name()))
+ raise
+
+ # Create docstrings for functions
+ # Don't include the make functions since they have already been dealt with.
+ funcs = [f for f in di.in_category(DoxyFunction)
+ if f.name() not in make_funcs and not f.name().startswith('std::')]
+ for f in funcs:
+ try:
+ output.update(make_entry(f))
+ except f.ParsingError:
+ sys.stderr.write(
+ 'Parsing error for function {0}\n'.format(f.name()))
+
+ # Create docstrings for classes
+ block_names = [block.name() for block in blocks]
+ block_names += [block.name() for block in blocks2]
+ klasses = [k for k in di.in_category(DoxyClass)
+ if k.name() not in block_names and not k.name().startswith('std::')]
+ for k in klasses:
+ try:
+ output.update(make_class_entry(k))
+ except k.ParsingError:
+ sys.stderr.write('Parsing error for class {0}\n'.format(k.name()))
+
+ # Docstrings are not created for anything that is not a function or a class.
+ # If this excludes anything important please add it here.
+
+ return output
+
+
+def sub_docstring_in_pydoc_h(pydoc_files, docstrings_dict, output_dir, filter_str=None):
+ if filter_str:
+ docstrings_dict = {
+ k: v for k, v in docstrings_dict.items() if k.startswith(filter_str)}
+
+ with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file:
+
+ for pydoc_file in pydoc_files:
+ if filter_str:
+ filter_str2 = "::".join((filter_str, os.path.split(
+ pydoc_file)[-1].split('_pydoc_template.h')[0]))
+ docstrings_dict2 = {
+ k: v for k, v in docstrings_dict.items() if k.startswith(filter_str2)}
+ else:
+ docstrings_dict2 = docstrings_dict
+
+ file_in = open(pydoc_file, 'r').read()
+ for key, value in docstrings_dict2.items():
+ file_in_tmp = file_in
+ try:
+ doc_key = key.split("::")
+ # if 'gr' in doc_key:
+ # doc_key.remove('gr')
+ doc_key = '_'.join(doc_key)
+ regexp = r'(__doc_{} =\sR\"doc\()[^)]*(\)doc\")'.format(
+ doc_key)
+ regexp = re.compile(regexp, re.MULTILINE)
+
+ (file_in, nsubs) = regexp.subn(
+ r'\1' + value + r'\2', file_in, count=1)
+ if nsubs == 1:
+ status_file.write("PASS: " + pydoc_file + "\n")
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt
+ except: # be permissive, TODO log, but just leave the docstring blank
+ status_file.write("FAIL: " + pydoc_file + "\n")
+ file_in = file_in_tmp
+
+ output_pathname = os.path.join(output_dir, os.path.basename(
+ pydoc_file).replace('_template.h', '.h'))
+ with open(output_pathname, 'w') as file_out:
+ file_out.write(file_in)
+
+
+def copy_docstring_templates(pydoc_files, output_dir):
+ with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file:
+ for pydoc_file in pydoc_files:
+ file_in = open(pydoc_file, 'r').read()
+ output_pathname = os.path.join(output_dir, os.path.basename(
+ pydoc_file).replace('_template.h', '.h'))
+ with open(output_pathname, 'w') as file_out:
+ file_out.write(file_in)
+ status_file.write("DONE")
+
+
+def argParse():
+ """Parses commandline args."""
+ desc = 'Scrape the doxygen generated xml for docstrings to insert into python bindings'
+ parser = ArgumentParser(description=desc)
+
+ parser.add_argument("function", help="Operation to perform on docstrings", choices=[
+ "scrape", "sub", "copy"])
+
+ parser.add_argument("--xml_path")
+ parser.add_argument("--bindings_dir")
+ parser.add_argument("--output_dir")
+ parser.add_argument("--json_path")
+ parser.add_argument("--filter", default=None)
+
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ # Parse command line options and set up doxyxml.
+ args = argParse()
+ if args.function.lower() == 'scrape':
+ di = DoxyIndex(args.xml_path)
+ docstrings_dict = get_docstrings_dict(di)
+ with open(args.json_path, 'w') as fp:
+ json.dump(docstrings_dict, fp)
+ elif args.function.lower() == 'sub':
+ with open(args.json_path, 'r') as fp:
+ docstrings_dict = json.load(fp)
+ pydoc_files = glob.glob(os.path.join(
+ args.bindings_dir, '*_pydoc_template.h'))
+ sub_docstring_in_pydoc_h(
+ pydoc_files, docstrings_dict, args.output_dir, args.filter)
+ elif args.function.lower() == 'copy':
+ pydoc_files = glob.glob(os.path.join(
+ args.bindings_dir, '*_pydoc_template.h'))
+ copy_docstring_templates(pydoc_files, args.output_dir)
diff --git a/gr-aistx/examples/README b/gr-aistx/examples/README
new file mode 100644
index 0000000..d7a3359
--- /dev/null
+++ b/gr-aistx/examples/README
@@ -0,0 +1,3 @@
+It is considered good practice to add examples in here to demonstrate the
+functionality of your OOT module. Python scripts, GRC flow graphs or other
+code can go here.
diff --git a/gr-aistx/grc/AISTX_Build_Frame.block.yml b/gr-aistx/grc/AISTX_Build_Frame.block.yml
new file mode 100644
index 0000000..cb121d6
--- /dev/null
+++ b/gr-aistx/grc/AISTX_Build_Frame.block.yml
@@ -0,0 +1,33 @@
+# auto-generated by grc.converter
+
+id: AISTX_Build_Frame
+label: AIS Frame Builder
+category: '[AISTX]'
+
+parameters:
+- id: sentence
+ label: Sentence
+ dtype: string
+ default: '010010000011101011110111001110011000100000000000000000100000001011001000001011000101000110100010010100001101011001111011000011111111111011100101110011100000000000000110'
+- id: repeat
+ label: Repeat
+ dtype: enum
+ default: 'True'
+ options: ['True', 'False']
+ option_labels: ['Yes', 'No']
+- id: enable_NRZI
+ label: Enable_NRZI_Conversion
+ dtype: enum
+ default: 'True'
+ options: ['True', 'False']
+ option_labels: ['Yes', 'No']
+
+outputs:
+- domain: stream
+ dtype: byte
+
+templates:
+ imports: import gnuradio.aistx as aistx
+ make: aistx.Build_Frame(${sentence}, ${repeat}, ${enable_NRZI})
+
+file_format: 1
diff --git a/gr-aistx/grc/AISTX_Build_Frame.xml b/gr-aistx/grc/AISTX_Build_Frame.xml
deleted file mode 100644
index 61f7198..0000000
--- a/gr-aistx/grc/AISTX_Build_Frame.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
- AIS Frame Builder
- AISTX_Build_Frame
- AISTX
- import AISTX
- AISTX.Build_Frame($sentence, $repeat, $enable_NRZI)
-
- Sentence
- sentence
- 010010000011101011110111001110011000100000000000000000100000001011001000001011000101000110100010010100001101011001111011000011111111111011100101110011100000000000000110
- string
-
-
- Repeat
- repeat
- True
- enum
-
-
-
-
- Enable_NRZI_Conversion
- enable_NRZI
- True
- enum
-
-
-
-
-
- out
- byte
-
-
diff --git a/gr-aistx/grc/AISTX_DebugME.block.yml b/gr-aistx/grc/AISTX_DebugME.block.yml
new file mode 100644
index 0000000..b867d14
--- /dev/null
+++ b/gr-aistx/grc/AISTX_DebugME.block.yml
@@ -0,0 +1,24 @@
+# auto-generated by grc.converter
+
+id: AISTX_DebugME
+label: DebugME
+category: '[AISTX]'
+
+parameters:
+- id: type
+ label: Input Type
+ dtype: enum
+ options: [complex, float, byte]
+ option_attributes:
+ size: [gr.sizeof_gr_complex, gr.sizeof_float, gr.sizeof_char]
+ hide: part
+
+inputs:
+- domain: stream
+ dtype: ${ type }
+
+templates:
+ imports: import gnuradio.aistx as aistx
+ make: aistx.DebugME(${type.size})
+
+file_format: 1
diff --git a/gr-aistx/grc/AISTX_DebugME.xml b/gr-aistx/grc/AISTX_DebugME.xml
deleted file mode 100644
index 9dcdc31..0000000
--- a/gr-aistx/grc/AISTX_DebugME.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
- DebugME
- AISTX_DebugME
- AISTX
- import AISTX
- AISTX.DebugME($type.size)
-
- Input Type
- type
- enum
-
-
-
-
-
- in
- $type
-
-
diff --git a/gr-aistx/grc/AISTX_nrz_to_nrzi.block.yml b/gr-aistx/grc/AISTX_nrz_to_nrzi.block.yml
new file mode 100644
index 0000000..17b314a
--- /dev/null
+++ b/gr-aistx/grc/AISTX_nrz_to_nrzi.block.yml
@@ -0,0 +1,19 @@
+# auto-generated by grc.converter
+
+id: AISTX_nrz_to_nrzi
+label: Nrz to nrzi
+category: '[AISTX]'
+
+inputs:
+- domain: stream
+ dtype: byte
+
+outputs:
+- domain: stream
+ dtype: byte
+
+templates:
+ imports: import gnuradio.aistx as aistx
+ make: aistx.nrz_to_nrzi()
+
+file_format: 1
diff --git a/gr-aistx/grc/AISTX_nrz_to_nrzi.xml b/gr-aistx/grc/AISTX_nrz_to_nrzi.xml
deleted file mode 100644
index d2191bb..0000000
--- a/gr-aistx/grc/AISTX_nrz_to_nrzi.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-
- Nrz to nrzi
- AISTX_nrz_to_nrzi
- AISTX
- import AISTX
- AISTX.nrz_to_nrzi()
-
- in
- byte
-
-
- out
- byte
-
-
diff --git a/gr-aistx/grc/CMakeLists.txt b/gr-aistx/grc/CMakeLists.txt
index 2ae27c1..3838c08 100644
--- a/gr-aistx/grc/CMakeLists.txt
+++ b/gr-aistx/grc/CMakeLists.txt
@@ -1,23 +1,14 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
+
install(FILES
- AISTX_nrz_to_nrzi.xml
- AISTX_Build_Frame.xml
- AISTX_DebugME.xml DESTINATION share/gnuradio/grc/blocks
+ AISTX_Build_Frame.block.yml
+ AISTX_DebugME.block.yml
+ AISTX_nrz_to_nrzi.block.yml
+ DESTINATION share/gnuradio/grc/blocks
)
diff --git a/gr-aistx/include/AISTX/CMakeLists.txt b/gr-aistx/include/AISTX/CMakeLists.txt
deleted file mode 100644
index 257680e..0000000
--- a/gr-aistx/include/AISTX/CMakeLists.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2011,2012 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-########################################################################
-# Install public header files
-########################################################################
-install(FILES
- api.h
- nrz_to_nrzi.h
- Build_Frame.h
- DebugME.h DESTINATION include/AISTX
-)
diff --git a/gr-aistx/include/AISTX/api.h b/gr-aistx/include/AISTX/api.h
deleted file mode 100644
index 40308fd..0000000
--- a/gr-aistx/include/AISTX/api.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2011 Free Software Foundation, Inc.
- *
- * This file is part of GNU Radio
- *
- * GNU Radio is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3, or (at your option)
- * any later version.
- *
- * GNU Radio is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU Radio; see the file COPYING. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street,
- * Boston, MA 02110-1301, USA.
- */
-
-#ifndef INCLUDED_AISTX_API_H
-#define INCLUDED_AISTX_API_H
-
-#include
-
-#ifdef gnuradio_AISTX_EXPORTS
-# define AISTX_API __GR_ATTR_EXPORT
-#else
-# define AISTX_API __GR_ATTR_IMPORT
-#endif
-
-#endif /* INCLUDED_AISTX_API_H */
diff --git a/gr-aistx/include/AISTX/Build_Frame.h b/gr-aistx/include/gnuradio/aistx/Build_Frame.h
similarity index 94%
rename from gr-aistx/include/AISTX/Build_Frame.h
rename to gr-aistx/include/gnuradio/aistx/Build_Frame.h
index 003a562..4d11eb7 100644
--- a/gr-aistx/include/AISTX/Build_Frame.h
+++ b/gr-aistx/include/gnuradio/aistx/Build_Frame.h
@@ -22,11 +22,11 @@
#ifndef INCLUDED_AISTX_BUILD_FRAME_H
#define INCLUDED_AISTX_BUILD_FRAME_H
-#include
+#include
#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
/*!
* \brief Builds AIS Frame
@@ -36,7 +36,7 @@ namespace gr {
class AISTX_API Build_Frame : virtual public gr::sync_block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Builds an AIS Frame of 256 bytes.
diff --git a/gr-aistx/include/gnuradio/aistx/CMakeLists.txt b/gr-aistx/include/gnuradio/aistx/CMakeLists.txt
new file mode 100644
index 0000000..9c230f5
--- /dev/null
+++ b/gr-aistx/include/gnuradio/aistx/CMakeLists.txt
@@ -0,0 +1,18 @@
+# Copyright 2011,2012 Free Software Foundation, Inc.
+#
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+########################################################################
+# Install public header files
+########################################################################
+install(FILES
+ api.h
+ Build_Frame.h
+ DebugME.h
+ nrz_to_nrzi.h
+ DESTINATION include/gnuradio/aistx
+)
diff --git a/gr-aistx/include/AISTX/DebugME.h b/gr-aistx/include/gnuradio/aistx/DebugME.h
similarity index 93%
rename from gr-aistx/include/AISTX/DebugME.h
rename to gr-aistx/include/gnuradio/aistx/DebugME.h
index 310c57d..9f76b10 100644
--- a/gr-aistx/include/AISTX/DebugME.h
+++ b/gr-aistx/include/gnuradio/aistx/DebugME.h
@@ -22,13 +22,13 @@
#ifndef INCLUDED_AISTX_DEBUGME_H
#define INCLUDED_AISTX_DEBUGME_H
-#include
+#include
#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
/*!
* \brief Print the incoming BYTE sequence
@@ -38,7 +38,7 @@ namespace gr {
class AISTX_API DebugME : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Print the incoming BYTE sequence as sequence of HEXs
diff --git a/gr-aistx/include/gnuradio/aistx/api.h b/gr-aistx/include/gnuradio/aistx/api.h
new file mode 100644
index 0000000..6d19667
--- /dev/null
+++ b/gr-aistx/include/gnuradio/aistx/api.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2011 Free Software Foundation, Inc.
+ *
+ * This file was generated by gr_modtool, a tool from the GNU Radio framework
+ * This file is a part of gr-aistx
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+#ifndef INCLUDED_AISTX_API_H
+#define INCLUDED_AISTX_API_H
+
+#include
+
+#ifdef gnuradio_aistx_EXPORTS
+#define AISTX_API __GR_ATTR_EXPORT
+#else
+#define AISTX_API __GR_ATTR_IMPORT
+#endif
+
+#endif /* INCLUDED_AISTX_API_H */
diff --git a/gr-aistx/include/AISTX/nrz_to_nrzi.h b/gr-aistx/include/gnuradio/aistx/nrz_to_nrzi.h
similarity index 93%
rename from gr-aistx/include/AISTX/nrz_to_nrzi.h
rename to gr-aistx/include/gnuradio/aistx/nrz_to_nrzi.h
index 571fc26..762e7d6 100644
--- a/gr-aistx/include/AISTX/nrz_to_nrzi.h
+++ b/gr-aistx/include/gnuradio/aistx/nrz_to_nrzi.h
@@ -22,11 +22,11 @@
#ifndef INCLUDED_AISTX_NRZ_TO_NRZI_H
#define INCLUDED_AISTX_NRZ_TO_NRZI_H
-#include
+#include
#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
/*!
* \brief Convert from NRZ to NRZI
@@ -36,7 +36,7 @@ namespace gr {
class AISTX_API nrz_to_nrzi : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Convert from NRZ to NRZI
diff --git a/gr-aistx/lib/Build_Frame_impl.cc b/gr-aistx/lib/Build_Frame_impl.cc
index 66d4058..fc8cb74 100644
--- a/gr-aistx/lib/Build_Frame_impl.cc
+++ b/gr-aistx/lib/Build_Frame_impl.cc
@@ -40,7 +40,7 @@
#define DEBUG 0
namespace gr {
- namespace AISTX {
+ namespace aistx {
Build_Frame::sptr
Build_Frame::make(const char *sentence, bool repeat, bool enable_NRZI)
diff --git a/gr-aistx/lib/Build_Frame_impl.h b/gr-aistx/lib/Build_Frame_impl.h
index 6a6648f..df5a739 100644
--- a/gr-aistx/lib/Build_Frame_impl.h
+++ b/gr-aistx/lib/Build_Frame_impl.h
@@ -21,12 +21,12 @@
#ifndef INCLUDED_AISTX_BUILD_FRAME_IMPL_H
#define INCLUDED_AISTX_BUILD_FRAME_IMPL_H
-#include
+#include
#define __VERSION 0.3
namespace gr {
- namespace AISTX {
+ namespace aistx {
class Build_Frame_impl : public Build_Frame
{
diff --git a/gr-aistx/lib/CMakeLists.txt b/gr-aistx/lib/CMakeLists.txt
index 64a6e1c..8e5c475 100644
--- a/gr-aistx/lib/CMakeLists.txt
+++ b/gr-aistx/lib/CMakeLists.txt
@@ -1,67 +1,74 @@
-# Copyright 2011,2012 Free Software Foundation, Inc.
+# Copyright 2011,2012,2016,2018,2019 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Setup library
########################################################################
include(GrPlatform) #define LIB_SUFFIX
-include_directories(${Boost_INCLUDE_DIR})
-link_directories(${Boost_LIBRARY_DIRS})
-list(APPEND AISTX_sources
+list(APPEND aistx_sources
nrz_to_nrzi_impl.cc
Build_Frame_impl.cc
- DebugME_impl.cc )
+ DebugME_impl.cc
+)
+
+set(aistx_sources "${aistx_sources}" PARENT_SCOPE)
+if(NOT aistx_sources)
+ MESSAGE(STATUS "No C++ sources... skipping lib/")
+ return()
+endif(NOT aistx_sources)
-add_library(gnuradio-AISTX SHARED ${AISTX_sources})
-target_link_libraries(gnuradio-AISTX ${Boost_LIBRARIES} ${GNURADIO_RUNTIME_LIBRARIES})
-set_target_properties(gnuradio-AISTX PROPERTIES DEFINE_SYMBOL "gnuradio_AISTX_EXPORTS")
+add_library(gnuradio-aistx SHARED ${aistx_sources})
+target_link_libraries(gnuradio-aistx gnuradio::gnuradio-runtime)
+target_include_directories(gnuradio-aistx
+ PUBLIC $
+ PUBLIC $
+ )
+set_target_properties(gnuradio-aistx PROPERTIES DEFINE_SYMBOL "gnuradio_aistx_EXPORTS")
+
+if(APPLE)
+ set_target_properties(gnuradio-aistx PROPERTIES
+ INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
+ )
+endif(APPLE)
########################################################################
# Install built library files
########################################################################
-install(TARGETS gnuradio-AISTX
- LIBRARY DESTINATION lib${LIB_SUFFIX} # .so/.dylib file
- ARCHIVE DESTINATION lib${LIB_SUFFIX} # .lib file
- RUNTIME DESTINATION bin # .dll file
-)
+include(GrMiscUtils)
+GR_LIBRARY_FOO(gnuradio-aistx)
+
+########################################################################
+# Print summary
+########################################################################
+message(STATUS "Using install prefix: ${CMAKE_INSTALL_PREFIX}")
+message(STATUS "Building for version: ${VERSION} / ${LIBVER}")
########################################################################
# Build and register unit test
########################################################################
include(GrTest)
-include_directories(${CPPUNIT_INCLUDE_DIRS})
-
-list(APPEND test_AISTX_sources
- ${CMAKE_CURRENT_SOURCE_DIR}/test_AISTX.cc
- ${CMAKE_CURRENT_SOURCE_DIR}/qa_AISTX.cc
+# If your unit tests require special include paths, add them here
+#include_directories()
+# List all files that contain Boost.UTF unit tests here
+list(APPEND test_aistx_sources
)
+# Anything we need to link to for the unit tests go here
+list(APPEND GR_TEST_TARGET_DEPS gnuradio-aistx)
-add_executable(test-AISTX ${test_AISTX_sources})
-
-target_link_libraries(
- test-AISTX
- ${GNURADIO_RUNTIME_LIBRARIES}
- ${Boost_LIBRARIES}
- ${CPPUNIT_LIBRARIES}
- gnuradio-AISTX
-)
+if(NOT test_aistx_sources)
+ MESSAGE(STATUS "No C++ unit tests... skipping")
+ return()
+endif(NOT test_aistx_sources)
-GR_ADD_TEST(test_AISTX test-AISTX)
+foreach(qa_file ${test_aistx_sources})
+ GR_ADD_CPP_TEST("aistx_${qa_file}"
+ ${CMAKE_CURRENT_SOURCE_DIR}/${qa_file}
+ )
+endforeach(qa_file)
diff --git a/gr-aistx/lib/DebugME_impl.cc b/gr-aistx/lib/DebugME_impl.cc
index 3d0681c..6dbaa10 100644
--- a/gr-aistx/lib/DebugME_impl.cc
+++ b/gr-aistx/lib/DebugME_impl.cc
@@ -27,7 +27,7 @@
#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
DebugME::sptr
DebugME::make(size_t itemsize)
diff --git a/gr-aistx/lib/DebugME_impl.h b/gr-aistx/lib/DebugME_impl.h
index ae973cb..2f53bab 100644
--- a/gr-aistx/lib/DebugME_impl.h
+++ b/gr-aistx/lib/DebugME_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_AISTX_DEBUGME_IMPL_H
#define INCLUDED_AISTX_DEBUGME_IMPL_H
-#include
+#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
class DebugME_impl : public DebugME
{
diff --git a/gr-aistx/lib/nrz_to_nrzi_impl.cc b/gr-aistx/lib/nrz_to_nrzi_impl.cc
index 93934dc..7e09186 100644
--- a/gr-aistx/lib/nrz_to_nrzi_impl.cc
+++ b/gr-aistx/lib/nrz_to_nrzi_impl.cc
@@ -27,7 +27,7 @@
#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
nrz_to_nrzi::sptr
nrz_to_nrzi::make()
diff --git a/gr-aistx/lib/nrz_to_nrzi_impl.h b/gr-aistx/lib/nrz_to_nrzi_impl.h
index 6484f53..0b6616e 100644
--- a/gr-aistx/lib/nrz_to_nrzi_impl.h
+++ b/gr-aistx/lib/nrz_to_nrzi_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_AISTX_NRZ_TO_NRZI_IMPL_H
#define INCLUDED_AISTX_NRZ_TO_NRZI_IMPL_H
-#include
+#include
namespace gr {
- namespace AISTX {
+ namespace aistx {
class nrz_to_nrzi_impl : public nrz_to_nrzi
{
diff --git a/gr-aistx/python/__init__.py b/gr-aistx/python/__init__.py
deleted file mode 100644
index c1d8e3f..0000000
--- a/gr-aistx/python/__init__.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Copyright 2008,2009 Free Software Foundation, Inc.
-#
-# This application is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# This application is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-
-# The presence of this file turns this directory into a Python package
-
-'''
-This is the GNU Radio AISTX module. Place your Python package
-description here (python/__init__.py).
-'''
-
-# ----------------------------------------------------------------
-# Temporary workaround for ticket:181 (swig+python problem)
-import sys
-_RTLD_GLOBAL = 0
-try:
- from dl import RTLD_GLOBAL as _RTLD_GLOBAL
-except ImportError:
- try:
- from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL
- except ImportError:
- pass
-
-if _RTLD_GLOBAL != 0:
- _dlopenflags = sys.getdlopenflags()
- sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL)
-# ----------------------------------------------------------------
-
-
-# import swig generated symbols into the AISTX namespace
-from AISTX_swig import *
-
-# import any pure python here
-#
-
-# ----------------------------------------------------------------
-# Tail of workaround
-if _RTLD_GLOBAL != 0:
- sys.setdlopenflags(_dlopenflags) # Restore original flags
-# ----------------------------------------------------------------
diff --git a/gr-aistx/python/aistx/.gitignore b/gr-aistx/python/aistx/.gitignore
new file mode 100644
index 0000000..85c92e8
--- /dev/null
+++ b/gr-aistx/python/aistx/.gitignore
@@ -0,0 +1,5 @@
+*~
+*.pyc
+*.pyo
+build*/
+examples/grc/*.py
diff --git a/gr-aistx/python/CMakeLists.txt b/gr-aistx/python/aistx/CMakeLists.txt
similarity index 53%
rename from gr-aistx/python/CMakeLists.txt
rename to gr-aistx/python/aistx/CMakeLists.txt
index b79d4bf..7598bae 100644
--- a/gr-aistx/python/CMakeLists.txt
+++ b/gr-aistx/python/aistx/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-aistx
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Include python install macros
@@ -25,13 +14,15 @@ if(NOT PYTHONINTERP_FOUND)
return()
endif()
+add_subdirectory(bindings)
+
########################################################################
# Install python sources
########################################################################
GR_PYTHON_INSTALL(
FILES
__init__.py
- DESTINATION ${GR_PYTHON_DIR}/AISTX
+ DESTINATION ${GR_PYTHON_DIR}/gnuradio/aistx
)
########################################################################
@@ -39,8 +30,15 @@ GR_PYTHON_INSTALL(
########################################################################
include(GrTest)
-set(GR_TEST_TARGET_DEPS gnuradio-AISTX)
-set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig)
+set(GR_TEST_TARGET_DEPS gnuradio-aistx)
GR_ADD_TEST(qa_nrz_to_nrzi ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_nrz_to_nrzi.py)
GR_ADD_TEST(qa_Build_Frame ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_Build_Frame.py)
GR_ADD_TEST(qa_DebugME ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_DebugME.py)
+
+# Create a package directory that tests can import. It includes everything
+# from `python/`.
+add_custom_target(
+ copy_module_for_tests ALL
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}
+ ${PROJECT_BINARY_DIR}/test_modules/gnuradio/aistx/
+)
diff --git a/gr-aistx/python/aistx/__init__.py b/gr-aistx/python/aistx/__init__.py
new file mode 100644
index 0000000..abeb8d0
--- /dev/null
+++ b/gr-aistx/python/aistx/__init__.py
@@ -0,0 +1,23 @@
+#
+# Copyright 2008,2009 Free Software Foundation, Inc.
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+# The presence of this file turns this directory into a Python package
+
+'''
+This is the GNU Radio AISTX module. Place your Python package
+description here (python/__init__.py).
+'''
+import os
+
+# import pybind11 generated symbols into the aistx namespace
+try:
+ # this might fail if the module is python-only
+ from .aistx_python import *
+except ModuleNotFoundError:
+ pass
+
+# import any pure python here
+#
diff --git a/gr-aistx/python/aistx/bindings/Build_Frame_python.cc b/gr-aistx/python/aistx/bindings/Build_Frame_python.cc
new file mode 100644
index 0000000..87532a8
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/Build_Frame_python.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(0) */
+/* BINDTOOL_USE_PYGCCXML(0) */
+/* BINDTOOL_HEADER_FILE(Build_Frame.h) */
+/* BINDTOOL_HEADER_FILE_HASH(907e50c6ebda30a530d6617244b764f1) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_Build_Frame(py::module& m)
+{
+
+ using Build_Frame = ::gr::aistx::Build_Frame;
+
+
+ py::class_>(m, "Build_Frame", D(Build_Frame))
+
+ .def(py::init(&Build_Frame::make),
+ py::arg("sentence"),
+ py::arg("repeat"),
+ py::arg("enable_NRZI"),
+ D(Build_Frame, make))
+
+
+ ;
+}
diff --git a/gr-aistx/python/aistx/bindings/CMakeLists.txt b/gr-aistx/python/aistx/bindings/CMakeLists.txt
new file mode 100644
index 0000000..c3917d5
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/CMakeLists.txt
@@ -0,0 +1,48 @@
+# Copyright 2020 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+########################################################################
+# Check if there is C++ code at all
+########################################################################
+if(NOT aistx_sources)
+ MESSAGE(STATUS "No C++ sources... skipping python bindings")
+ return()
+endif(NOT aistx_sources)
+
+########################################################################
+# Check for pygccxml
+########################################################################
+GR_PYTHON_CHECK_MODULE_RAW(
+ "pygccxml"
+ "import pygccxml"
+ PYGCCXML_FOUND
+ )
+
+include(GrPybind)
+
+########################################################################
+# Python Bindings
+########################################################################
+
+list(APPEND aistx_python_files
+ Build_Frame_python.cc
+ DebugME_python.cc
+ nrz_to_nrzi_python.cc
+ python_bindings.cc)
+
+GR_PYBIND_MAKE_OOT(aistx
+ ../../..
+ gr::aistx
+ "${aistx_python_files}")
+
+# copy bindings extension for use in QA test module
+add_custom_command(TARGET aistx_python POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy $
+ ${PROJECT_BINARY_DIR}/test_modules/gnuradio/aistx/
+)
+
+install(TARGETS aistx_python DESTINATION ${GR_PYTHON_DIR}/gnuradio/aistx COMPONENT pythonapi)
diff --git a/gr-aistx/python/aistx/bindings/DebugME_python.cc b/gr-aistx/python/aistx/bindings/DebugME_python.cc
new file mode 100644
index 0000000..570a796
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/DebugME_python.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(0) */
+/* BINDTOOL_USE_PYGCCXML(0) */
+/* BINDTOOL_HEADER_FILE(DebugME.h) */
+/* BINDTOOL_HEADER_FILE_HASH(1fffe89f84ead16b08749e8c27960288) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_DebugME(py::module& m)
+{
+
+ using DebugME = ::gr::aistx::DebugME;
+
+
+ py::class_>(
+ m, "DebugME", D(DebugME))
+
+ .def(py::init(&DebugME::make), py::arg("itemsize"), D(DebugME, make))
+
+
+ ;
+}
diff --git a/gr-aistx/python/aistx/bindings/README.md b/gr-aistx/python/aistx/bindings/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/gr-aistx/python/aistx/bindings/bind_oot_file.py b/gr-aistx/python/aistx/bindings/bind_oot_file.py
new file mode 100644
index 0000000..543c699
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/bind_oot_file.py
@@ -0,0 +1,54 @@
+import warnings
+import argparse
+from gnuradio.bindtool import BindingGenerator
+import sys
+import tempfile
+
+parser = argparse.ArgumentParser(description='Bind a GR Out of Tree Block')
+parser.add_argument('--module', type=str,
+ help='Name of gr module containing file to bind (e.g. fft digital analog)')
+
+parser.add_argument('--output_dir', default=tempfile.gettempdir(),
+ help='Output directory of generated bindings')
+parser.add_argument('--prefix', help='Prefix of Installed GNU Radio')
+
+parser.add_argument(
+ '--filename', help="File to be parsed")
+
+parser.add_argument(
+ '--defines', help='Set additional defines for precompiler', default=(), nargs='*')
+parser.add_argument(
+ '--include', help='Additional Include Dirs, separated', default=(), nargs='*')
+
+parser.add_argument(
+ '--status', help='Location of output file for general status (used during cmake)', default=None
+)
+parser.add_argument(
+ '--flag_automatic', default='0'
+)
+parser.add_argument(
+ '--flag_pygccxml', default='0'
+)
+
+args = parser.parse_args()
+
+prefix = args.prefix
+output_dir = args.output_dir
+defines = tuple(','.join(args.defines).split(','))
+includes = ','.join(args.include)
+name = args.module
+
+namespace = ['gr', name]
+prefix_include_root = name
+
+
+with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+ bg = BindingGenerator(prefix, namespace,
+ prefix_include_root, output_dir, define_symbols=defines, addl_includes=includes,
+ catch_exceptions=False, write_json_output=False, status_output=args.status,
+ flag_automatic=True if args.flag_automatic.lower() in [
+ '1', 'true'] else False,
+ flag_pygccxml=True if args.flag_pygccxml.lower() in ['1', 'true'] else False)
+ bg.gen_file_binding(args.filename)
diff --git a/gr-aistx/python/aistx/bindings/docstrings/Build_Frame_pydoc_template.h b/gr-aistx/python/aistx/bindings/docstrings/Build_Frame_pydoc_template.h
new file mode 100644
index 0000000..3406a63
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/docstrings/Build_Frame_pydoc_template.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr, aistx, __VA_ARGS__)
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+static const char* __doc_gr_aistx_Build_Frame = R"doc()doc";
+
+
+static const char* __doc_gr_aistx_Build_Frame_Build_Frame = R"doc()doc";
+
+
+static const char* __doc_gr_aistx_Build_Frame_make = R"doc()doc";
diff --git a/gr-aistx/python/aistx/bindings/docstrings/DebugME_pydoc_template.h b/gr-aistx/python/aistx/bindings/docstrings/DebugME_pydoc_template.h
new file mode 100644
index 0000000..aa9decb
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/docstrings/DebugME_pydoc_template.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr, aistx, __VA_ARGS__)
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+static const char* __doc_gr_aistx_DebugME = R"doc()doc";
+
+
+static const char* __doc_gr_aistx_DebugME_DebugME = R"doc()doc";
+
+
+static const char* __doc_gr_aistx_DebugME_make = R"doc()doc";
diff --git a/gr-aistx/python/aistx/bindings/docstrings/README.md b/gr-aistx/python/aistx/bindings/docstrings/README.md
new file mode 100644
index 0000000..a506c22
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/docstrings/README.md
@@ -0,0 +1 @@
+This directory stores templates for docstrings that are scraped from the include header files for each block
diff --git a/gr-aistx/python/aistx/bindings/docstrings/nrz_to_nrzi_pydoc_template.h b/gr-aistx/python/aistx/bindings/docstrings/nrz_to_nrzi_pydoc_template.h
new file mode 100644
index 0000000..04ea7f1
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/docstrings/nrz_to_nrzi_pydoc_template.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr, aistx, __VA_ARGS__)
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+static const char* __doc_gr_aistx_nrz_to_nrzi = R"doc()doc";
+
+
+static const char* __doc_gr_aistx_nrz_to_nrzi_nrz_to_nrzi = R"doc()doc";
+
+
+static const char* __doc_gr_aistx_nrz_to_nrzi_make = R"doc()doc";
diff --git a/gr-aistx/python/aistx/bindings/header_utils.py b/gr-aistx/python/aistx/bindings/header_utils.py
new file mode 100644
index 0000000..7c26fe0
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/header_utils.py
@@ -0,0 +1,80 @@
+# Utilities for reading values in header files
+
+from argparse import ArgumentParser
+import re
+
+
+class PybindHeaderParser:
+ def __init__(self, pathname):
+ with open(pathname, 'r') as f:
+ self.file_txt = f.read()
+
+ def get_flag_automatic(self):
+ # p = re.compile(r'BINDTOOL_GEN_AUTOMATIC\(([^\s])\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_GEN_AUTOMATIC\(([^\s])\)', self.file_txt)
+ if (m and m.group(1) == '1'):
+ return True
+ else:
+ return False
+
+ def get_flag_pygccxml(self):
+ # p = re.compile(r'BINDTOOL_USE_PYGCCXML\(([^\s])\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_USE_PYGCCXML\(([^\s])\)', self.file_txt)
+ if (m and m.group(1) == '1'):
+ return True
+ else:
+ return False
+
+ def get_header_filename(self):
+ # p = re.compile(r'BINDTOOL_HEADER_FILE\(([^\s]*)\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_HEADER_FILE\(([^\s]*)\)', self.file_txt)
+ if (m):
+ return m.group(1)
+ else:
+ return None
+
+ def get_header_file_hash(self):
+ # p = re.compile(r'BINDTOOL_HEADER_FILE_HASH\(([^\s]*)\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_HEADER_FILE_HASH\(([^\s]*)\)', self.file_txt)
+ if (m):
+ return m.group(1)
+ else:
+ return None
+
+ def get_flags(self):
+ return f'{self.get_flag_automatic()};{self.get_flag_pygccxml()};{self.get_header_filename()};{self.get_header_file_hash()};'
+
+
+def argParse():
+ """Parses commandline args."""
+ desc = 'Reads the parameters from the comment block in the pybind files'
+ parser = ArgumentParser(description=desc)
+
+ parser.add_argument("function", help="Operation to perform on comment block of pybind file", choices=[
+ "flag_auto", "flag_pygccxml", "header_filename", "header_file_hash", "all"])
+ parser.add_argument(
+ "pathname", help="Pathname of pybind c++ file to read, e.g. blockname_python.cc")
+
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ # Parse command line options and set up doxyxml.
+ args = argParse()
+
+ pbhp = PybindHeaderParser(args.pathname)
+
+ if args.function == "flag_auto":
+ print(pbhp.get_flag_automatic())
+ elif args.function == "flag_pygccxml":
+ print(pbhp.get_flag_pygccxml())
+ elif args.function == "header_filename":
+ print(pbhp.get_header_filename())
+ elif args.function == "header_file_hash":
+ print(pbhp.get_header_file_hash())
+ elif args.function == "all":
+ print(pbhp.get_flags())
diff --git a/gr-aistx/python/aistx/bindings/nrz_to_nrzi_python.cc b/gr-aistx/python/aistx/bindings/nrz_to_nrzi_python.cc
new file mode 100644
index 0000000..e3c862a
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/nrz_to_nrzi_python.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(0) */
+/* BINDTOOL_USE_PYGCCXML(0) */
+/* BINDTOOL_HEADER_FILE(nrz_to_nrzi.h) */
+/* BINDTOOL_HEADER_FILE_HASH(4301cfa9ae9ac3df447be8c54adcdf9c) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_nrz_to_nrzi(py::module& m)
+{
+
+ using nrz_to_nrzi = ::gr::aistx::nrz_to_nrzi;
+
+
+ py::class_>(
+ m, "nrz_to_nrzi", D(nrz_to_nrzi))
+
+ .def(py::init(&nrz_to_nrzi::make), D(nrz_to_nrzi, make))
+
+
+ ;
+}
diff --git a/gr-aistx/python/aistx/bindings/python_bindings.cc b/gr-aistx/python/aistx/bindings/python_bindings.cc
new file mode 100644
index 0000000..3c98cb8
--- /dev/null
+++ b/gr-aistx/python/aistx/bindings/python_bindings.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+#include
+
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#include
+
+namespace py = pybind11;
+
+// Headers for binding functions
+/**************************************/
+// The following comment block is used for
+// gr_modtool to insert function prototypes
+// Please do not delete
+/**************************************/
+// BINDING_FUNCTION_PROTOTYPES(
+void bind_Build_Frame(py::module& m);
+void bind_DebugME(py::module& m);
+void bind_nrz_to_nrzi(py::module& m);
+
+// ) END BINDING_FUNCTION_PROTOTYPES
+
+
+// We need this hack because import_array() returns NULL
+// for newer Python versions.
+// This function is also necessary because it ensures access to the C API
+// and removes a warning.
+void* init_numpy()
+{
+ import_array();
+ return NULL;
+}
+
+PYBIND11_MODULE(aistx_python, m)
+{
+ // Initialize the numpy C API
+ // (otherwise we will see segmentation faults)
+ init_numpy();
+
+ // Allow access to base block methods
+ py::module::import("gnuradio.gr");
+
+ /**************************************/
+ // The following comment block is used for
+ // gr_modtool to insert binding function calls
+ // Please do not delete
+ /**************************************/
+ // BINDING_FUNCTION_CALLS(
+ bind_Build_Frame(m);
+ bind_DebugME(m);
+ bind_nrz_to_nrzi(m);
+ // ) END BINDING_FUNCTION_CALLS
+}
diff --git a/gr-aistx/python/qa_Build_Frame.py b/gr-aistx/python/aistx/qa_Build_Frame.py
similarity index 97%
rename from gr-aistx/python/qa_Build_Frame.py
rename to gr-aistx/python/aistx/qa_Build_Frame.py
index 8542990..f88115a 100755
--- a/gr-aistx/python/qa_Build_Frame.py
+++ b/gr-aistx/python/aistx/qa_Build_Frame.py
@@ -19,7 +19,7 @@
#
from gnuradio import gr, gr_unittest
-import AISTX_swig as AISTX
+import aistx_python as aistx
class qa_Build_Frame (gr_unittest.TestCase):
diff --git a/gr-aistx/python/qa_DebugME.py b/gr-aistx/python/aistx/qa_DebugME.py
similarity index 97%
rename from gr-aistx/python/qa_DebugME.py
rename to gr-aistx/python/aistx/qa_DebugME.py
index 1781be1..081e8fa 100755
--- a/gr-aistx/python/qa_DebugME.py
+++ b/gr-aistx/python/aistx/qa_DebugME.py
@@ -19,7 +19,7 @@
#
from gnuradio import gr, gr_unittest
-import AISTX_swig as AISTX
+import aistx_python as aistx
class qa_DebugME (gr_unittest.TestCase):
diff --git a/gr-aistx/python/qa_nrz_to_nrzi.py b/gr-aistx/python/aistx/qa_nrz_to_nrzi.py
similarity index 97%
rename from gr-aistx/python/qa_nrz_to_nrzi.py
rename to gr-aistx/python/aistx/qa_nrz_to_nrzi.py
index 3951919..54a88ea 100755
--- a/gr-aistx/python/qa_nrz_to_nrzi.py
+++ b/gr-aistx/python/aistx/qa_nrz_to_nrzi.py
@@ -19,7 +19,7 @@
#
from gnuradio import gr, gr_unittest
-import AISTX_swig as AISTX
+import aistx_python as aistx
class qa_nrz_to_nrzi (gr_unittest.TestCase):
diff --git a/gr-aistx/swig/AISTX_swig.i b/gr-aistx/swig/AISTX_swig.i
deleted file mode 100644
index e3326b3..0000000
--- a/gr-aistx/swig/AISTX_swig.i
+++ /dev/null
@@ -1,24 +0,0 @@
-/* -*- c++ -*- */
-
-#define AISTX_API
-
-%include "gnuradio.i" // the common stuff
-
-//load generated python docstrings
-%include "AISTX_swig_doc.i"
-
-%{
-#include "AISTX/nrz_to_nrzi.h"
-#include "AISTX/Build_Frame.h"
-#include "AISTX/DebugME.h"
-%}
-
-
-%include "AISTX/nrz_to_nrzi.h"
-GR_SWIG_BLOCK_MAGIC2(AISTX, nrz_to_nrzi);
-
-%include "AISTX/Build_Frame.h"
-GR_SWIG_BLOCK_MAGIC2(AISTX, Build_Frame);
-
-%include "AISTX/DebugME.h"
-GR_SWIG_BLOCK_MAGIC2(AISTX, DebugME);
diff --git a/gr-aistx/swig/CMakeLists.txt b/gr-aistx/swig/CMakeLists.txt
deleted file mode 100644
index 0a80154..0000000
--- a/gr-aistx/swig/CMakeLists.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-########################################################################
-# Include swig generation macros
-########################################################################
-find_package(SWIG)
-find_package(PythonLibs)
-if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND)
- return()
-endif()
-include(GrSwig)
-include(GrPython)
-
-########################################################################
-# Setup swig generation
-########################################################################
-foreach(incdir ${GNURADIO_RUNTIME_INCLUDE_DIRS})
- list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/swig)
-endforeach(incdir)
-
-set(GR_SWIG_LIBRARIES gnuradio-AISTX)
-set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/AISTX_swig_doc.i)
-set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include)
-
-GR_SWIG_MAKE(AISTX_swig AISTX_swig.i)
-
-########################################################################
-# Install the build swig module
-########################################################################
-GR_SWIG_INSTALL(TARGETS AISTX_swig DESTINATION ${GR_PYTHON_DIR}/AISTX)
-
-########################################################################
-# Install swig .i files for development
-########################################################################
-install(
- FILES
- AISTX_swig.i
- ${CMAKE_CURRENT_BINARY_DIR}/AISTX_swig_doc.i
- DESTINATION ${GR_INCLUDE_DIR}/AISTX/swig
-)