diff --git a/docs/_images/add_new_mfc_tag.png b/docs/_images/add_new_mfc_tag.png new file mode 100644 index 0000000..6380d91 Binary files /dev/null and b/docs/_images/add_new_mfc_tag.png differ diff --git a/docs/_images/allKeyIds.png b/docs/_images/allKeyIds.png new file mode 100644 index 0000000..ebfe720 Binary files /dev/null and b/docs/_images/allKeyIds.png differ diff --git a/docs/_images/calibration_keys_enum.png b/docs/_images/calibration_keys_enum.png new file mode 100644 index 0000000..6eadc9f Binary files /dev/null and b/docs/_images/calibration_keys_enum.png differ diff --git a/docs/_images/ckv_config.png b/docs/_images/ckv_config.png new file mode 100644 index 0000000..bcc9a89 Binary files /dev/null and b/docs/_images/ckv_config.png differ diff --git a/docs/_images/ckv_subgraph.png b/docs/_images/ckv_subgraph.png new file mode 100644 index 0000000..25d2154 Binary files /dev/null and b/docs/_images/ckv_subgraph.png differ diff --git a/docs/_images/define_key_values.png b/docs/_images/define_key_values.png new file mode 100644 index 0000000..5098c3b Binary files /dev/null and b/docs/_images/define_key_values.png differ diff --git a/docs/_images/define_new_tag.png b/docs/_images/define_new_tag.png new file mode 100644 index 0000000..24758ba Binary files /dev/null and b/docs/_images/define_new_tag.png differ diff --git a/docs/_images/device_pp_subgraph.png b/docs/_images/device_pp_subgraph.png new file mode 100644 index 0000000..0671241 Binary files /dev/null and b/docs/_images/device_pp_subgraph.png differ diff --git a/docs/_images/device_subgraph.png b/docs/_images/device_subgraph.png new file mode 100644 index 0000000..1edcded Binary files /dev/null and b/docs/_images/device_subgraph.png differ diff --git a/docs/_images/graph_keys.png b/docs/_images/graph_keys.png new file mode 100644 index 0000000..187ff02 Binary files /dev/null and b/docs/_images/graph_keys.png differ diff --git a/docs/_images/mfc_assigned_tags.png b/docs/_images/mfc_assigned_tags.png new file mode 100644 index 0000000..a4a5234 Binary files /dev/null and b/docs/_images/mfc_assigned_tags.png differ diff --git a/docs/_images/mfc_calibration_window.png b/docs/_images/mfc_calibration_window.png new file mode 100644 index 0000000..e65c8af Binary files /dev/null and b/docs/_images/mfc_calibration_window.png differ diff --git a/docs/_images/mfc_kvh2xml.png b/docs/_images/mfc_kvh2xml.png new file mode 100644 index 0000000..f3dd8d3 Binary files /dev/null and b/docs/_images/mfc_kvh2xml.png differ diff --git a/docs/_images/open_key_configurator.png b/docs/_images/open_key_configurator.png new file mode 100644 index 0000000..0b857d9 Binary files /dev/null and b/docs/_images/open_key_configurator.png differ diff --git a/docs/_images/stream-device_subgraph.png b/docs/_images/stream-device_subgraph.png new file mode 100644 index 0000000..0fcba11 Binary files /dev/null and b/docs/_images/stream-device_subgraph.png differ diff --git a/docs/_images/stream_subgraph.png b/docs/_images/stream_subgraph.png new file mode 100644 index 0000000..cc44c25 Binary files /dev/null and b/docs/_images/stream_subgraph.png differ diff --git a/docs/_images/system_designer_workflow.png b/docs/_images/system_designer_workflow.png new file mode 100644 index 0000000..ee6f0ab Binary files /dev/null and b/docs/_images/system_designer_workflow.png differ diff --git a/docs/_images/tag_key.png b/docs/_images/tag_key.png new file mode 100644 index 0000000..d997831 Binary files /dev/null and b/docs/_images/tag_key.png differ diff --git a/docs/_images/volume_control_ckv.png b/docs/_images/volume_control_ckv.png new file mode 100644 index 0000000..dd43e6f Binary files /dev/null and b/docs/_images/volume_control_ckv.png differ diff --git a/docs/_sources/design/arspf_design.rst.txt b/docs/_sources/design/arspf_design.rst.txt index b6900ba..2b9b65f 100644 --- a/docs/_sources/design/arspf_design.rst.txt +++ b/docs/_sources/design/arspf_design.rst.txt @@ -1168,37 +1168,7 @@ Customizations Custom module ------------- - -The custom module development workflow involves the following high-level steps: - -1. Start custom algorithm using standard industrial tool such as Matlab and optimize the algorithm - for intended processor architecture - -2. Develop the Common Audio Processor Interface (CAPI) wrapper for the - custom algorithm. For examples and detailed instructions, see the :ref:`capi_mod_dev_guide` - -3. Develop an API header file consisting of Module ID and configuration - parameters related to the custom algorithm. - -4. Generate an API XML file by running the h2xml conversion tool on the API - header file. The XML file provides the necessary information about configuration - interfaces, supported containers, stack size, and any other policies - that are required for the AudioReach configuration tool (ARC platform). - -5. Compile the CAPI-wrapped module as a built-in module as part of ARE image - or standalone shared object. - -6. Import the custom module into the ARC platform through a module - discovery workflow, and create use case graphs by placing the module - in the appropriate container and subgraphs. - -7. Calibrate or configure the module together with an end-to-end use - case, and store the data in the file system (through the ACDB file - provided by the ARC platform). - -8. Launch the end-to-end use case from the application, which in turn - uses the use case graph and calibration information from the ACDB - file and provides them to the ARE to realize the use case. +For steps on how to add a custom module, please refer to the :ref:`adding_modules` guide. Custom container ---------------- diff --git a/docs/_sources/design/design_concept.rst.txt b/docs/_sources/design/design_concept.rst.txt index 1262f68..efa4093 100644 --- a/docs/_sources/design/design_concept.rst.txt +++ b/docs/_sources/design/design_concept.rst.txt @@ -1,134 +1,254 @@ -AudioReach Concept and Terminology -===================================== +.. _design_concept: + +AudioReach Concepts and Terminology +################################### .. contents:: :local: :depth: 2 Introduction ----------------- +============ -Design and implementation of AudioReach software revolves around following core concepts and associated terminology. Before developing audio applications using AudioReach, it is important for developers to comprehend these concepts. +Design and implementation of AudioReach software revolves around following core concepts and associated terminology. Before developing audio applications using AudioReach, it is important for developers to comprehend these concepts: - - Audio graph + - Audio graphs + - Key vectors + - Module identification and configuration - Use case to audio graph mapping - Use case to calibration data mapping - - Module identification and configuration - Data driven through H2XML -Audio Graph ------------------------------ +This document will be referring to the AudioReach Engine (or ARE), which is the signal processing framework for AudioReach. For more information on AudioReach Engine, please refer to the :ref:`arspf_design` page. + +Use Case to Audio Graph Mapping +=============================== + +In AudioReach, use cases are represented as audio graphs containing a series of interconnected audio modules from source end point(s) to sink end point(s). +The following sections will discuss the different components of audio graphs in AudioReach and how to configure various aspects of an audio use case, as well as how these concepts are presented in AudioReach Creator. + +Graphs +------- + +* A **graph** is a logical representation of a group of one or more sub-graphs connected together in-order to realize a specific use-case. Data moves across this graph to realize the use-case. + + * The simplest graph could be one subgraph consisting of a single module. +* **Subgraphs** are a logical abstraction for a group of modules that are connected and manipulated as a single entity. They are used to independently control portions of a graph when switching between use cases. -Concept -++++++++ + * The purpose of dividing a use case into subgraphs is to make graph management easier. It is not always necessary to change all modules in a graph when the use case changes or when a device switch happens. Subgraphs enable an update to apply to only a part of the graph. + * Using subgraphs reduces the number of entries required to complete a full device tuning for all use cases. +* **Modules** are independent blocks in the DSP or software that accomplish some aspect of a use case. They are the smallest independent processing unit within the signal processing framework. In AudioReach, every function from endpoint to endpoint is represented as a module. Some examples of modules include hardware sources and sinks, software memory endpoints, and audio processing such as filtering, and data logging and metering. +* **Containers** are a unique concept of AudioReach Engine. They allow a system designer to group and execute audio processing modules together in a single software thread. -Figure below illustrates key constructs and composition of a typical audio playback use case graph consisting of multiple sub-graphs. Inside each sub-graph, there are multiple modules, all assigned with unique IDs (instance id), which can be grouped into one or multiple containers. Please note that container may not be associated with only one sub-graph. +More detailed information about each of the concepts above can be found in the :ref:`arspf_design` page. -Breaking audio graph into sub-graphs enables audio system designer group audio processing modules into higher-level constructs such as stream-leg and device-leg. Then, designer can develop middleware layer to manage audio use case and sink/source endpoint in form of stream and device. Endpoint can be hardware base such as I2S and software base such as shared memory for exchanging audio sample between client and AudioReach Engine. During state transition such as switching audio output to different sink endpoint, middleware layer only needs to tear down device-leg sub-graph while retaining stream-leg sub-graph. Then, resume the use case by instantiating another device-leg subgraph for new sink endpoint and reattach to stream-leg subgraph to form new full graph. +The figure below illustrates key constructs and composition of a typical audio playback use case graph consisting of multiple subgraphs. Inside each subgraph, there are multiple modules, all assigned with unique IDs (instance id), which can be grouped into one or multiple containers. Please note that container may not be associated with only one subgraph. .. _example_audiopb_graph: -.. figure:: images/graph_concept.png +.. figure:: images/concept/graph_concept.png :figclass: fig-center :scale: 75 % - + Example Audio Playback Graph -Constructs -++++++++++ +Breaking an audio graph into subgraphs enables the audio system designer to group audio processing modules into higher-level constructs such as stream-leg and device-leg. Then, the designer can develop middleware layer to +manage the audio use case and the sink/source endpoint in the form of stream and device. Endpoints can be a hardware base, such as I2S, or a software base, such as shared memory for exchanging audio sample between client +and AudioReach Engine. During a state transition, such as switching the audio output to a different sink endpoint, the middleware layer only needs to tear down device-leg subgraph while retaining stream-leg subgraph. +Then, the use case can resume by instantiating another device-leg subgraph for new sink endpoint and reattaching it to the stream-leg subgraph to form a new full graph. -This section provides high-level description of audio graph constructs. More detail information of each construct can be found in :ref:`arspf_design`. -**Module:** is the smallest independent processing unit within signal -processing framework. +Keys and Values +------------------- +A **key** is an abstract entity containing several values that uniquely identify some aspect of an audio use case. -**Sub-graph**: is a logical abstraction for a group of modules that are -connected and manipulated as a single entity. +A **key vector** (KV) is a general term used to describe a set of key-values. Each KV may +contain one or more key-value pairs. By using different KVs, multiple use cases and +calibrations can be achieved. In AudioReach, there are three kinds of KVs: -**Graph**: is a logical interpretation of a group of one or more -sub-graphs connected together in-order to realize a specific use-case. -Data moves across this graph to realize the use-case. -The simplest graph could be one sub-graph consisting of a single module. +* **Graph key vector (GKV)** – Defines a use case. Key-values are applied to subgraphs within the use case. The graph or system designer associates a set of unique and when creating a sub-graph from the ARC UI canvas. +* **Tag Key Vector (TKV)** – Also known as a Module Tag. Applied to individual modules that require parameter control at runtime. Each module tag may contain one or more keys, and these keys may be applied on a per-module basis in the form of a TKV. +* **Calibration key vector (CKV)** – Define various calibrations within a single use case (for example, sample rate or volume dependency). Key-Values are applied to individual modules. -**Container**: is a unique concept of AudioReach Engine. -It allows system designer to group and execute audio processing modules together -in single software thread +**Key-value pairs** are the individual keys and associated values in the key vector. For example, sound device can be a key and the value can be headphone, speaker, or other sound device. +The full list of keys and values enabled in AudioReach can be found in the **Audio Calibration Database** (or ACDB) in the file `kvh2xml.h `_. -Use Case to Audio Graph Mapping --------------------------------- +Graph Key Vector (GKV) +---------------------- +A **GKV**, also known as a use case ID, is a vector of key-value pairs that uniquely identify +a whole graph. The key-value pairs are used to select a unique combination of +subgraphs to realize the full use case. -Concept -++++++++ +GKV example +~~~~~~~~~~~ -Refer to :ref:`arch_overview`. Audio graph definitions are stored in ACDB. During use case setup time, audio graph definition is retrieved from ACDB with use case handle passed by the client which is in form of key vector. In the figure - :ref:`example_audiopb_graph`, GKV is consisted of 3 key-value pairs in this example. What attributes to be used as keys to form key vector depends on platform requirement. For example, sound device can be used as key with potential value such as headphone and speaker. +Below is an example set of GKVs for a Low-Latency playback graph: -Once audio graph definition is retrieved by looking up ACDB with GKV, graph definition is pushed down to ARE by ARGS. +* [StreamRx: PCM_LL_Playback] [DeviceRX: Speaker] [Instance: Instance_1] [DevicePP_Rx: Audio_MBDRC] -Constructs -++++++++++ +The GKVs are managed by the PAL layer (audioreach-pal) at runtime. When a new stream is opened, the requested stream type will be sent to the pal_stream_open +API. These stream types are mapped to the StreamRx key values. So when opening the new stream, PAL will assign the next available +instance of the desired stream type and map the correct instance key and value. The same is true for the Device type as well. For example, if the caller of pal_stream_open specifies the output +device as "Speaker", PAL will map this to the corresponding Speaker GKV. -This section provides high-level description of constructs involved in use case to audio graph mapping. More detail information of each construct can be found in :ref:`args_design`. +Note that changing the key value does not automatically change the topology. When +making customizations, care must be taken to ensure that key values match their +respective subgraph topologies. +Together, these four key-values make up the full GKV which is addressed from the +driver-side perspective. +However, from the system designer perspective, these key-values can be assigned +flexibly to subgraphs. Each subgraph has a SGKV that always consists of one or more of +the graph key-values. This allows the system designer to achieve potentially complex +subgraph configurations from a simple set of graph key-values. -**Use Case**: An audio use case is a graph of modules from source end point(s) to sink end point(s) that satisfies the product defined use case. +Use Case to Calibration Data Mapping +==================================== -**Key value pair**: is the individual key and associated values in the key vector. For example, sound device can be a key and value can be headphone, speaker, or other sound device. +Once a graph is loaded on ARE, the next step is to push the corresponding calibration for all the modules in the graph so modules can produce desired acoustic output for intended use cases. +Mapping of calibration data to use case is done by querying ACDB with the calibration handle in the form of key-vector (CKV) as depicted in figure - :ref:`example_audiopb_graph`. -**Key Vector**: Uniquely identify graph or subgraph through a set of key value pairs. +Typically, calibration data being applied is highly dependent on runtime parameters such as sample-rate, bitwidth, channels, gains, and etc. Hence, it is likely system designer would use these run-time parameters as keys. -**Graph Key Vector**: A unique identifier to retrieve the Graph. A KV ( key vector) is represented by a set of multiple key value pairs. The graph or system designer associates a set of unique and when creating a sub-graph from the ARC UI canvas. +Calibration Key Vector (CKV) +---------------------------- +A **CKV** is assigned at a per-module level to realize specific calibrations for a use case. +Calibration keys enable multiple calibrations within a single subgraph. Each module in +the subgraph may be dependent on none, some, or all available CKVs depending on the +system designer’s choice. The system designer may also choose to include or exclude +groups of module parameters from CKV dependency. +For example, a use case may require different module tunings for different sample rates. +Using CKVs, the user can specify which modules and module parameters are samplerate dependent or agnostic. This simplifies the total number of calibration entries and +reduces unnecessary copying between calibrations. -Use Case to Calibration Data Mapping -------------------------------------- +When assigning multiple CKVs to a module, the number of calibration entries is n*m, +where n and m are the number of values used for each key. For example, if one key had +4 values, and another had 6 values, the total number of calibration entries would be 24 if +both were assigned to a single module. -Concept -++++++++++++ +CKV example +~~~~~~~~~~~ +Consider the Device subgraph from the Low Latency playback use case (note: this is the Low Latency playback graph that is used for the RB3 Gen2 device). -Once a graph is loaded on ARE, the next step is to push the corresponding calibration for all the modules in the graph so modules can produce desired acoustic output for intended use cases. -Mapping of calibration data to use case is by querying ACDB with calibration handle in form of key-vector (CKV) as depicted in figure - :ref:`example_audiopb_graph`. +.. figure:: images/concept/ckv_subgraph.png + :figclass: fig-left + :scale: 85 % -Typically, calibration data being applied is highly dependent on runtime parameters such as sample-rate, bitwidth, channels, gains, and etc. Hence, it is likely system designer would use these run-time parameters as keys. +The CKVs used in this subgraph are visible as drop-down menus in the top bar of the +subgraph. -Constructs -++++++++++++ +By selecting CKVs from the drop-down menus, the system designer or tuning engineer +may recall a specific calibration for the entire subgraph. This can affect multiple modules at a time or only one module. +In this case, setting a volume level will change the calibration for the Volume Control module. +Selecting the volume level will change the gain value for all channels. For example, setting the volume calibration to +"Level_3" will increase the gain value by a specified amount in the Volume Control module, as seen in the below image: -This section provides high-level description of constructs involved in use case to calibration mapping. More detail information of each construct can be found in :ref:`args_design`. +.. figure:: images/concept/volume_control_ckv.png + :figclass: fig-left + :scale: 85 % -**Key value pair**: is the individual key and associated values in the key vector. For example, sample rate can be a key with values such as 8Khz, 16Khz, 48Khz. +CKVs can be viewed or modified by using the Key Configurator. -**Key Vector**: Uniquely identify calibration data through a set of key value pairs. +.. figure:: images/concept/ckv_config.png + :figclass: fig-left + :scale: 85 % -**Calibration Key Vector**: A unique identifier to retrieve the calibration data. A KV (key vector) is represented by a set of multiple key value pairs. The graph or system designer associates a set of unique and when storing calibration from the ARC. +Parameters may be copied from one calibration to another using the batch copy +function. Module Identification and Configuration ------------------------------------------ +======================================= -Concept -+++++++++ +A usecase may require enable/disable/configure certain capability at runtime e.g controllable audio effects: equalizer, volume, and echo cancellation. This capability can be supported by one or multiple modules in the audio graph. +Different algorithm developers can develop modules supporting same capability but different configuration parameters. Audio system designer may select one module over the other for his/her product. -A usecase may require enable/disable/configure certain capability at runtime e.g controllable audio effects: equalizer, volume, and echo cancellation. The capability can be supported by one or multiple modules in the audio graph. Different algorithm developers can develop modules supporting same capability but different configuration parameters. Audio system designer may select one module over the other for his/her product. +Since the module implementing this capability may be different across different subgraphs and software does not want to be hard coded to work with fixed set of modules, a mechanism to identify and configure these modules in a generic way is needed. +AudioReach architecture refers generic identification and configuration mechanism as "tagging". -Since the module implementing this capability may be different across different subgraphs and software does not want to be hard coded to work with fixed set of modules, a mechanism to identify and configure these modules in a generic way is needed. AudioReach architecture refers generic identification and configuration mechanism as "tagging". Audio system designer tags the module in the graph with identifier(tag ID) and defines keys to represent different configuration of given module through ARC and save into ACDB. For example, an audio application needs to support turning on/off echo cancellation at run-time. Audio system designer defines tag as "echo cancellation" and keys as ON and OFF. Then, tag the EC module in the graph with the tag "echo cancellation" and map key - ON/OFF to module specific parameters and values +Tag Key Vector (TKV) +--------------------- -When the capability needs to enable/disable/configured, software fetches tagged module info and configuration parameters from ACDB by passing tag and key. So software, at run-time, can address and package the configuration to desired module running on ARE in generic fashion. +A module tag, or **TKV**, is an identifier set on a module to identify and set runtime controllable +parameters. -Constructs -++++++++++++ +For example, an audio application may need to support turning echo cancellation on or off during run-time. To do this, the audio system designer can define a tag "echo cancellation", and keys "ON" and "OFF". +Then, this tag-key pair can be used on the EC module in the graph. -**Tag & Tag Key Vector**: Tag is an identifier set on a module to identify/set runtime controllable params of one or more modules. Use case may require updating configuration of one or more modules in a graph to enable/disable/configure certain capability of a module at runtime. e.g. echo cancellation, and equalizer +Then, when the echo cancellation needs to be enabled/disabled/configured, the software fetches the tagged module info and configuration parameters from the Audio Calibration Database by passing the tag and key. +This allows the software to address and package the configuration to desired module running on AudioReach Engine in a generic fashion during run-time. -**Module Parameter Structure**: In AudioReach architecture, commands and events exchanged with ARE are always packaged as depicted in the figure below +In AudioReach architecture, commands and events exchanged with ARE are always packaged as depicted in the figure below. -.. figure:: images/module_structure.png - :figclass: fig-center +.. figure:: images/concept/module_structure.png + :figclass: fig-left Module Parameter Structure +This capability can be supported by one or multiple modules in the audio graph. Additionally, different algorithm developers can develop modules supporting the same capabilities but with different configuration parameters. + +Each module tag may contain one or more keys, and these keys may be applied on a per-module basis as a TKV. Multiple TKVs may be applied to a module for different states. +Other module tags may not have key(s) associated. In this case, only the tag is applied to a module. + + +TKV example +~~~~~~~~~~~ + +Consider the Media Format Converter (or MFC) module, which is used to convert the media format of audio streams. +During an audio playback use case, the MFC can convert the media format of the audio clip to a configuration that is supported by the +device endpoint. For example, if an output stream has a 44.1K sample rate, but the backend device is configured to 48K, the MFC will +convert the output stream to 48K to match the output device. This is accomplished through the use of the tag pspd_mfc, which contains keys for sampling rate, bit width, and channels. +The source code that configures this tag can be found in the `configure_mfc `_ +function in the AGM test application agmplay.c. The definition of this tag is listed in the kvh2xml.h file below: + +.. figure:: images/concept/mfc_kvh2xml.png + :figclass: fig-left + :scale: 100 % + + +TKVs can be assigned to modules through the "Key Configurator" tab in AudioReach Creator. To view: + +1. On the top bar, select "View" and then "Key Configurator." A Key Configurator tab will appear on the right + +2. Select a module in the graph view. + +3. Select the Key Configurator Tab on the right. + +.. figure:: images/concept/open_key_configurator.png + :figclass: fig-left + +4. Select the Module Tag tab. The currently assigned tags for the module can be seen on the top (if empty, there are no tags assigned to the module): + +.. figure:: images/concept/mfc_assigned_tags.png + :figclass: fig-left + +Each group of module parameters, (called parameter ID (PID)) may be controlled by +module tags. In the above example, the pspd_mfc tag is assigned with three TKVs: Sampling Rate, Bit Width, and Channels. +In the Module Tag tab, the user can add a new +tag with an additional set of supported TKVs by selecting "Start Graph Modification", then selecting the plus button in the top right of the Module Tag tab. + +Below shows an example of how to add a new pspd_mfc tag index, with updated sample rate, bit width, and channel values: + +.. figure:: images/concept/add_new_mfc_tag.png + :figclass: fig-left + +Once the tag values are added in the Key Configurator window, the new parameter values can be set for each module tag entry. Close the Key Configurator and +double-click on the module in the Graph View to open the Calibration Window. + +Click Tag Data in the left sidebar to reveal the module tag calibration. Then click the Tag Indices drop-down to set parameters for each tag index. The newly added tag values can be seen here. +Now, tag indices can be selected based on the desired output media format. + +.. figure:: images/concept/mfc_calibration_window.png + :figclass: fig-left + Data Exchange Modes -++++++++++++++++++++ +------------------- -There are different modes which calibration/configuration data can be applied on targeted module(s). Choice of mode is based on whether module has the mode implemented in consideration of size of configuration data, available memory to hold the calibration data, or memory access requirement (Read-Only, Read-Write). Refer to :ref:`spf_cal_config_mode` to learn more about data change modes. Note that GSL does not support shared-persistent calibration at time of writing. +There are different modes which calibration/configuration data can be applied on targeted module(s). +Choice of mode is based on whether module has the mode implemented in consideration of size of configuration data, available memory to hold the calibration data, or memory access requirement (Read-Only, Read-Write). +Refer to :ref:`spf_cal_config_mode` to learn more about data change modes. Note that GSL does not support shared-persistent calibration at time of writing. Data Driven through H2XML -------------------------- -H2XML (Header to XML) is a generic tool for generating XML files from annotated C header files. Grammar and syntax of the annotations are similar to Doxygen. H2XML plays big part in enabling data-driven workflow. For example, audio algorithm developers can generate metadata (in form of XML files) of their processing module from module header files using H2XML and import generated XML file into ACDB via ARC tool to incorporate and configure audio processing module in use case graph design. +========================= +H2XML (Header to XML) is a generic tool for generating XML files from annotated C header files. Grammar and syntax of the annotations are similar to Doxygen. +H2XML plays big part in enabling data-driven workflow. For example, audio algorithm developers can generate metadata (in form of XML files) of their processing module from module header files using H2XML and +import generated XML file into ACDB via ARC tool to incorporate and configure audio processing module in use case graph design. diff --git a/docs/_sources/design/linux_plug-in_arch.rst.txt b/docs/_sources/design/linux_plug-in_arch.rst.txt index efe6385..21741b4 100644 --- a/docs/_sources/design/linux_plug-in_arch.rst.txt +++ b/docs/_sources/design/linux_plug-in_arch.rst.txt @@ -224,7 +224,24 @@ Graph Overview Sample Audio Graph for MSSD Scenario Figure depicts the reference design of audio graph for MSSD playback scenario. In this example, stream sub-graph and stream-PP sub graph are consolidated into just stream sub-graph. Stream subgraph consists of write shared memory endpoint, PCM decoder, PCM converter. Client passes PCM samples to write shared memory endpoint. PCM converter is put in place to convert PCM samples to format supported by the stream-specific post-processing modules if conversion is necessary. Output of stream subgraph is fed into stream-device subgraph which consists of media format converter(MFC). MFC is put in place to convert stream-leg PCM to device-leg PCM format. After conversion, output of stream-device sub-graph is fed into device PP subgraph for device-specific post-processing. Note that mixer is placed at the beginning of subgraph to mix input streams. Output of device PP subgraph is then feed into device subgraph containing hardware endpoint module such as I2S driver for eventual rendering out of SoC. - + +The reference playback graphs for Linux platforms typically consists of the following subgraphs: + +1. **Stream** – The software interface between the DSP and high-level operating system. +2. **Stream-PP** – Contains postprocessing (PP) modules specific to a stream (for example, bass boost, reverb, etc.) +3. **Stream-Device** – Consists of any per stream per device modules such as sample rate/media format conversion +4. **Device-PP** – Contains PP modules specific to a hardware device (common examples include IIR Filter, MBDRC, etc.) +5. **Device** – The hardware endpoint, most often a mic or a speaker. + +An Rx (audio output) use case will follow this order (Stream -> Device), while a Tx (audio input) use case will be +reversed (Device -> Stream). +By default, GKVs are defined for Stream, StreamPP, Device, and DevicePP. StreamDevice subgraphs do not have a unique GKV, but instead use a combination of Stream +and Device GKVs. + +Please note that it is not necessary for every graph to have a Stream-PP, Stream-Device, or Device-PP subgraph. +Most commonly, subgraphs are only defined once for each Stream or Device, and +different calibrations are realized with the PP subgraphs. + Key Vector Design ^^^^^^^^^^^^^^^^^^^^^^ @@ -257,6 +274,28 @@ Key Vector Design | Stream2 + Device Metadata | StreamRX2DeviceRX KVs, DeviceRX PP KVs | +-----------------------------+------------------------------------------+ +Below is a breakdown of a Low Latency playback graph from the RB3 Gen2 ACDB file: + +StreamRX Subgraph: + +.. figure:: images/linux/stream_subgraph.png + :figclass: fig-center + +Stream-Device Subgraph: + +.. figure:: images/linux/stream-device_subgraph.png + :figclass: fig-center + +Device PP Subgraph: + +.. figure:: images/linux/device_pp_subgraph.png + :figclass: fig-center + +Device Subgraph: + +.. figure:: images/linux/device_subgraph.png + :figclass: fig-center + **GKV** GKV1: `__. +2. Add the new tag as a define in kvh2xml.h. Tag values follow the format 0xC00000FF: + +.. figure:: images/system_integrator/define_new_tag.png + :figclass: fig-left + :scale: 60 % + +3. Add one or more keys to associate with the module tag: + +.. figure:: images/system_integrator/tag_key.png + :figclass: fig-left + :scale: 60 % + +4. Update the driver side logic to utilize the new tag. + +5. After recompiling, the output XML file is automatically generated. Import the new +KVH2XML xml file using the Discovery Wizard. For details, see section 4.1 of the ARC guide. + + diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt index 538b8a6..736a075 100644 --- a/docs/_sources/index.rst.txt +++ b/docs/_sources/index.rst.txt @@ -9,10 +9,11 @@ Welcome to AudioReach Announcements ************* +* (10/6/2025): The newly added :ref:`dev_workflow` guide provides a starting point for developers to learn about the AudioReach developer workflow. * (8/22/2025): Two documentation pages have been recently released: - * The :ref:`available_modules` list provides an overview of all the available + * The :ref:`available_modules` list provides an overview of all the available audio alogirithms in AudioReach, including where to locate them in the open source project and a basic description of their capabilities. * The :ref:`adding_modules` guide outlines steps on how to add a custom audio algorithm to an AudioReach yocto build. diff --git a/docs/_sources/platform/raspberry_pi4.rst.txt b/docs/_sources/platform/raspberry_pi4.rst.txt index 09c2157..d3a5e49 100644 --- a/docs/_sources/platform/raspberry_pi4.rst.txt +++ b/docs/_sources/platform/raspberry_pi4.rst.txt @@ -51,10 +51,10 @@ Create a Yocto image The first step is to integrate AudioReach components into a Yocto build that can be loaded onto the Raspberry Pi device. This involves syncing a Yocto build and then integrating the meta-audioreach layer, which is currently available as a Github repository. -Before following these steps, it would be helpful to learn the basics of how to use a Yocto project. To do this, please refer to the official Yocto documentation site: https://docs.yoctoproject.org/2.0/yocto-project-qs/yocto-project-qs.html +Before following these steps, it would be helpful to learn the basics of how to use a Yocto project. To do this, please refer to the official Yocto documentation site: https://docs.yoctoproject.org/5.0.12/brief-yoctoprojectqs/index.html Step 1: Create a Yocto build -------------------------------- +----------------------------- Follow the below steps to setup a Yocto build: @@ -74,7 +74,11 @@ Follow the below steps to setup a Yocto build: source ./sources/poky/oe-init-build-env - * Navigate to the file "/build/conf/local.conf". In this file, locate the line **MACHINE ?= ""** and replace it with the line **MACHINE ?= "raspberrypi4"** + * Navigate to the file "/build/conf/local.conf" and add the below line: + + .. code-block:: bash + + MACHINE ?= "raspberrypi4" * Navigate to the "build/conf/bblayers.conf" file and add the necessary meta layers by editing the file as shown: @@ -98,22 +102,10 @@ Follow the below steps to setup a Yocto build: /sources/meta-openembedded/meta-python \ " -**Note:** The AudioReach project currently uses the "scarthgap" version of Yocto. Please ensure that your local system has the requirements needed for Yocto scarthgap builds by checking the "Linux Distribution" section on the Yocto documentation page here: https://docs.yoctoproject.org/2.0/yocto-project-qs/yocto-project-qs.html. +**Note:** The AudioReach project currently uses the "scarthgap" version of Yocto. +Please ensure that all utilities required for Yocto scarthgap builds meet the minimum version numbers, which are listed on the Yocto documentation site: https://docs.yoctoproject.org/5.0.12/ref-manual/system-requirements.html#required-git-tar-python-make-and-gcc-versions. -If not, please download the pre-built "buildtools" for Yocto using the below steps: - - .. code-block:: bash - - cd /sources/poky - scripts/install-buildtools - -Then run the following commands to setup your build environment to use buildtools: - - .. code-block:: bash - - cd - source ./sources/poky/oe-init-build-env - source ./sources/poky/buildtools/environment-setup-x86_64-pokysdk-linux +If not, please follow the steps in section 1.5.1 at the above link to install and setup buildtools. Step 2: Get AudioReach Meta Layer @@ -153,7 +145,11 @@ Step 4: Compile the image ------------------------- Now the build setup is complete, and the full Yocto image can be generated. Navigate to the "build" directory -and run the command **bitbake core-image-sato** +and run the below command to generate the image: + + .. code-block:: bash + + bitbake core-image-sato * If the bitbake command gives a "umask" error, run the command **umask 022** and try again. * If there is a "restricted license" error, navigate to the "/build/conf/local.conf" file and append the below line: @@ -170,8 +166,8 @@ Step 5: Flash the Yocto image The generated Yocto image can be flashed to an SD card using Raspberry Pi Imager. This can be installed from raspberrypi.com/software, or by running **sudo apt install rpi-imager** on a Linux terminal. Then follow the below steps to flash the device: -* Open Raspberry Pi Imager, and select "RaspberryPi4" as the device type. -* Under the "Choose OS" options, select the "Use custom" option. Make sure to search for all file types. Then navigate to the ".wic" file and select it. +* Open Raspberry Pi Imager, and if there is a "Choose Device" option, select "RaspberryPi4" as the device type. +* Under the "Choose OS" option, select "Use custom". Make sure to search for all file types. Then navigate to the ".wic" file and select it. * Under "Storage", select the desired SD card. * Click "Flash" to start flashing the image. @@ -193,6 +189,8 @@ Configure bootup settings Next, please complete the following steps to enable the audio and update the logging settings. The files mentioned below can be updated directly on the Raspberry Pi 4 UI if the device is plugged into an external monitor, or through a local computer using SCP. + * Note: Users can also connect to the Raspberry Pi through SSH by opening a connection to "root@". By default, there is no password required to connect to SSH. + To enable the sound card: * Navigate to the file "/boot/config.txt" @@ -201,6 +199,14 @@ To enable the sound card: * Make sure to uncomment this line while updating. +Optional step: In the file /boot/config.txt, it is also possible to disable HDMI audio output if the Raspberry Pi will be +connected to a display. This is helpful because if the HDMI sound card is enumerated, it may change the sound card ID of the +Headphones device, which would require the card ID to be updated in ARC. + + * Navigate to the file "/boot/config.txt" + * Locate the line **dtoverlay=vc4-kms-v3d** + * Change this line to **dtoverlay=vc4-kms-v3d,noaudio** + By default, the system logs printed while running a Raspberry Pi usecase will be short. The system log settings should be updated to capture the additional usecase logs that will be printed by AudioReach: * Navigate to the file "/etc/syslog-startup.conf" @@ -214,8 +220,11 @@ By default, the system logs printed while running a Raspberry Pi usecase will be * Save the file. To apply the updated configuration settings, shut down the Raspberry Pi through -the homescreen, or by running the command **shutdown -r -time "now"** through the -terminal. +the homescreen, or by running the below command in the terminal: + +.. code-block:: bash + + shutdown -r -time "now" Enable Real-time Calibration Mode --------------------------------- @@ -223,6 +232,8 @@ Enable Real-time Calibration Mode ARC (AudioReach Creator) is a tool that allows the user to perform several functionalities related to the audio usecase, including creating and editing audio usecase graphs, and editing audio configurations while running an audio usecase in real time. For more information on ARC, please refer to the :ref:`arc_design` page. + * Please note that at this time, AudioReach Creator is only available on Windows. + The below steps will demonstrate how to connect ARC to the Raspberry Pi so that the usecase graph can be viewed in real time. On the Raspberry Pi: @@ -275,9 +286,18 @@ If there are some issues running the usecase, please refer to the suggested fixe Check the sound card -------------------- -On the Raspberry Pi, open the file "/proc/asound/cards". There should be a few -sound card entries in this list. If the file instead says "no sound cards available", you likely -forgot to enable the sound card (see section `Configure bootup settings <#configure-bootup-settings>`__). +On the Raspberry Pi terminal, run the below command: + +.. code-block:: bash + + cat /proc/asound/cards + +This should output the available sound cards. If the output instead says "no sound cards available", you likely +forgot to enable the sound cards (see section `Configure bootup settings <#configure-bootup-settings>`__). + + .. figure:: images/rpi_sound_cards.png + :figclass: fig-left + :scale: 100 % Check the sound card ID ----------------------- @@ -285,7 +305,7 @@ Check the sound card ID If the Raspberry Pi is connected to the monitor, the HDMI-based soundcard might get enumerated in the file "/proc/asound/cards", causing the card ID of the Headphones to change. To fix this, you will need to have ARC installed on a secondary computer (see section `Enable Real-time Calibration Mode <#enable-real-time-calibration-mode>`__). - #. Copy the ACDB files from the Raspberry Pi to your local computer. These files + #. Copy the ACDB and workspace files from the Raspberry Pi to your local computer. These files can be found under the folder "/etc/acdbdata". * Note: This can be done by using "scp" commands on a Linux terminal or by using a program such as "WinScp". @@ -295,19 +315,19 @@ card ID of the Headphones to change. To fix this, you will need to have ARC inst copied from the Raspberry Pi. #. On the top left drop down menu displaying the usecases, - select any usecase that uses "Headphones". + select any usecase that uses "Speaker". - #. Double click the "ALSA device sink" module shown below + #. Double click the "ALSA Device Sink" module shown below - .. figure:: images/headphone_screenshot.png + .. figure:: images/alsa_sink_module.png :figclass: fig-left - :scale: 80 % + :scale: 100 % #. This will open the Configure Window. Check the "card_id" field here. The card_id should be the same as the ID that corresponds with the Headphones entry in the "/proc/asound/cards" file on the Raspberry Pi. - .. figure:: images/alsa_sink_module.png + .. figure:: images/alsa_configure_window.png :figclass: fig-left :scale: 100 % diff --git a/docs/api/args_arosal.html b/docs/api/args_arosal.html index 90a7824..62f9b70 100644 --- a/docs/api/args_arosal.html +++ b/docs/api/args_arosal.html @@ -1661,6 +1661,17 @@

ar_osal_sys_id

Last sub system ID

+
+
+AR_SUB_SYS_IDS_MASK
+

Bit masks representing the subsystem IDs. Update when subystem gets added or removed.

+
+ +
+
+AR_DEFAULT_DSP
+
+
@@ -2100,9 +2111,20 @@

ar_osal_shmem
typedef enum ar_shmem_buffer_index_type ar_shmem_buffer_index_type_t
-

enum for shmem offset/address buffer index type Bits to indicate if hardware accelerator is enabled/disabled

+

enum for shmem offset/address buffer index type

+
+ +
+
+typedef enum ar_shmem_pd_type ar_shmem_pd_type_t
+

Bits to indicate if hardware accelerator is enabled/disabled

+
+
+typedef struct ar_shmem_proc_info_t ar_shmem_proc_info
+
+
typedef struct ar_shmem_info_t ar_shmem_info
@@ -2193,6 +2215,22 @@

ar_osal_shmem

+
+
+enum ar_shmem_pd_type
+

Values:

+
+
+enumerator STATIC_PD
+
+ +
+
+enumerator DYNAMIC_PD
+
+ +
+
enum ar_shmem_hyp_assign_dest_sys_perm_t
@@ -2302,7 +2340,7 @@

ar_osal_shmem

Helps unmap the shared memory allocated externally with SMMU.

Parameters:
-

info[in] pointer to ar_shmem_info. required input parameters in ar_shmem_info ar_shmem_info_t.cache_type ar_shmem_info_t.buf_size ar_shmem_info_t.mem_type ar_shmem_info_t.pa_lsw ar_shmem_info_t.pa_msw ar_shmem_info_t.num_sys_id ar_shmem_info_t.sys_id

+

info[in] pointer to ar_shmem_info. required input parameters in ar_shmem_info ar_shmem_info_t.cache_type ar_shmem_info_t.buf_size ar_shmem_info_t.mem_type ar_shmem_info_t.pa_lsw ar_shmem_info_t.pa_msw ar_shmem_info_t.num_sys_id ar_shmem_info_t.sys_id

Returns:

0 — Success Nonzero — Failure

@@ -2353,6 +2391,31 @@

ar_osal_shmem

+
+
+struct ar_shmem_proc_info_t
+
+#include <ar_osal_shmem.h>
+
+

Public Members

+
+
+uint8_t proc_id
+
+ +
+
+ar_shmem_pd_type_t proc_type
+
+ +
+
+bool_t is_active
+
+ +
+
+
struct ar_shmem_info_t
@@ -2429,7 +2492,7 @@

ar_osal_shmem
-uint8_t *sys_id
+ar_shmem_proc_info *sys_id

in, pointer to array of size num_sys_id for sub-system Ids provided in ar_osal_sys_id.h, used to allocate shared memory between the given list of sys_id provided with ar_shmem_alloc()/ar_shmem_map() call.

diff --git a/docs/api/index.html b/docs/api/index.html index 0ed5d2b..826812a 100644 --- a/docs/api/index.html +++ b/docs/api/index.html @@ -24,7 +24,7 @@ - + @@ -98,7 +98,7 @@