Update to latest 'jb' from Wolfson Micro
This version has an audio input implementation. We still need a
configuration file appropriate for our boards.
diff --git a/Android.mk b/Android.mk
index a33b9ae..fb97c6b 100644
--- a/Android.mk
+++ b/Android.mk
@@ -1,22 +1,84 @@
-ifeq ($(strip $(BOARD_USES_TINY_AUDIO_HW)),true)
+#
+# Copyright (C) 2012 Wolfson Microelectronics plc
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
-# Should change this so the enable variable gets used as the name?
-LOCAL_MODULE := audio.primary.herring
-LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
-LOCAL_SRC_FILES := audio_hw.c
-LOCAL_C_INCLUDES += \
- external/tinyalsa/include \
- external/expat/lib \
- system/media/audio_utils/include \
- system/media/audio_effects/include
-LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioutils \
- libdl libexpat
+LOCAL_MODULE := libaudiohalcm
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
+LOCAL_C_INCLUDES += \
+ external/tinycompress/include \
+ external/tinyalsa/include \
+ external/tinyhal/audio \
+ external/expat/lib \
+ $(call include-path-for, audio-utils)
+
+LOCAL_SRC_FILES := \
+ audio_config.c
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libutils \
+ libdl \
+ liblog \
+ libexpat \
+ libtinyalsa \
+
+include $(BUILD_SHARED_LIBRARY)
+
+ifeq ($(strip $(BOARD_USES_TINYHAL_AUDIO)),true)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := audio.primary.$(TARGET_DEVICE)
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS += -Werror
+
+LOCAL_C_INCLUDES += \
+ external/tinycompress/include \
+ external/tinyalsa/include \
+ external/tinyhal/audio \
+ external/expat/lib \
+ $(call include-path-for, audio-utils)
+
+LOCAL_SRC_FILES := \
+ audio_hw.c \
+ voice_trigger.cpp
+
+LOCAL_STATIC_LIBRARIES := \
+ libmedia_helper
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libutils \
+ libdl \
+ liblog \
+ libhardware_legacy \
+ libtinyalsa \
+ libtinycompress \
+ libaudiohalcm \
+ libaudioutils \
+ libsysutils
+
include $(BUILD_SHARED_LIBRARY)
endif
diff --git a/audio.example.xml b/audio.example.xml
new file mode 100644
index 0000000..4b74272
--- /dev/null
+++ b/audio.example.xml
@@ -0,0 +1,264 @@
+<!-- This file is provided as a reference for writing config files
+It does NOT represent any particular device and will NOT work on
+real hardware. You must create a file with the correct settings for
+your hardware, and the comments here will explain the layout of this
+file and expected content.
+
+On the target device this file must be located in /system/etc and named
+audio.<device>.xml, where <device> is the string returned by the system
+property ro.product.device
+-->
+
+<audiohal>
+ <!-- mixer element _must_ be first. The 'card' attribute is optional
+ and sets the ALSA card number of the mixer device - if not given it
+ defaults to 0 -->
+
+ <mixer card="0">
+
+ <!-- init element lists control settings required to initialize the
+ hardware and driver. These settings are applied only once when the
+ library is first loaded during boot -->
+
+ <init>
+
+ <!-- A sequence of ctl elements. Each entry sets a mixer
+ control to a given value. The entries are applied in the
+ order they are listed here.
+ Each entry _must_ have these attributes
+ name - name of the ALSA mixer control
+ val - value to set it to
+
+ It can also have an index attribute giving the numeric index
+ of the control value to set. This is used where a control
+ has multiple value slots (for example a volume control
+ with two values, one for left channel and one for right), If
+ an index attribute is not given the content of the val attribute
+ will be applied to all value slots of the control.
+
+ The numbers in val and index attributes can be given in either
+ decimal, or hex (hex is prefixed with 0x). For a control with
+ enumerated values the val attribute must be a string
+
+ BYTE ARRAYS: for controls that are a byte array val must be
+ a string of comma-separated byte values. This can be shorter
+ than the total size of the control, combined with the
+ optional index attribute this allows any subset of the byte
+ array to be changed.
+ -->
+
+ <ctl name="DAC1 Switch" val="1" />
+ <ctl name="Speaker Enable" val="0"/>
+ <ctl name="Jack Enable" val="0"/>
+ <ctl name="Codec Config" index="8" val="0x7f,0x54,0xaa,0xaa"/>
+
+ </init>
+ </mixer>
+
+<!-- Next you must list all the devices supported by the hardware. The
+name attribute of the <device> element identifies the device. These names are
+recognized:
+ "global" dummy global device - see explanation below
+ "speaker" AUDIO_DEVICE_OUT_SPEAKER
+ "earpiece" AUDIO_DEVICE_OUT_EARPIECE
+ "headset" AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_IN_WIRED_HEADSET
+ "headphone" AUDIO_DEVICE_OUT_WIRED_HEADPHONE
+ "sco" AUDIO_DEVICE_OUT_ALL_SCO | AUDIO_DEVICE_IN_ALL_SCO
+ "a2dp" AUDIO_DEVICE_OUT_ALL_A2DP
+ "usb" AUDIO_DEVICE_OUT_ALL_USB
+ "mic" AUDIO_DEVICE_IN_BUILTIN_MIC
+ "back mic" AUDIO_DEVICE_IN_BACK_MIC
+ "voice" AUDIO_DEVICE_IN_VOICE_CALL
+ "aux" AUDIO_DEVICE_IN_AUX_DIGITAL
+
+Within the <device> element you can declare a number of "paths", each path
+defines a group of control settings to be applied. Each path is identified by
+a name. The "on" and "off" paths are special and list a global enable and
+disable setting for the device. Use of devices is reference-counted, when
+routing of a stream is changed to use a device that is currently disabled its
+"on" path will be applied. When no streams are using a device its "off"
+path will be applied.
+
+Other paths are user-defined and you can give them any name you choose.
+They are used to apply custom paths when required by a stream and will
+be used only when a stream is connected to or disconnected from a
+device if the <stream> element has an <enable> or <disable> element requesting
+that path.
+
+It is not mandatory to provide paths. You only need to define paths
+if there are specific control settings that must be applied. So for example
+if no controls need be applied to enable or disable a device then you
+do not need to define the "on" and "off" paths.
+
+The <ctl> elements within each path have the same format and behaviour
+as described under <mixer><init>.
+
+The "global" device is a special device the represents the audio system as a
+whole and is used to invoke mixer settings that are independent of any real
+device and which apply globally to the audio system. A stream is automatically
+connected to "global" when it is opened and disconnected when it is closed.
+The behaviour is identical to the way paths are invoked in any other <device>
+entry so the effect is
+ - the "on" path will be applied when a stream is opened and there are
+ no other streams already open. As an example this could be used to
+ bring the audio hardware out of a standby state
+
+ - the "off" path will be applied when the last open stream is closed.
+ As an example this could be used to put the audio hardware into a
+ standby state
+
+ - the custom paths will be applied when the stream that requests them
+ is opened or closed.
+-->
+
+ <device name="speaker">
+ <path name="on">
+ <!-- List of ctl element for control values to apply
+ when this device is enabled -->
+ <ctl name="Speaker Enable" val="1"/>
+ </path>
+
+ <path name="off">
+ <!-- List of ctl element for control values to apply
+ when this device is disabled -->
+ <ctl name="Speaker Enable" val="0"/>
+ </path>
+
+ <!-- Following paths are user-defined and are applied when a
+ <stream> elements' routing is changed to add or remove this
+ device. If the path name matches the name given in the <stream>
+ element it will be applied. A stream could be routed to multiple
+ inputs or outputs - the paths for connecting and disconnecting
+ a stream to a device must therefore have the same name in each
+ <device>.
+
+ It is not mandatory to declare custom paths - depending on your
+ hardware there may not be any specific action required to route
+ a stream to a particular device. Also you do not have to define
+ the path in every device, only the devices where some action must
+ be taken to connect or disconnect a stream.
+
+ For this example four custom paths are defined:
+ pcm_out_en = control setting to connect PCM output to this device
+ pcm_out_dis = control setting to disconnect PCM output from this device
+ -->
+
+ <path name="pcm_out_en">
+ <ctl name="PCM Speaker switch" val="1"/>
+ </path>
+ <path name="pcm_out_dis">
+ <ctl name="PCM Speaker switch" val="0"/>
+ </path>
+ </device>
+
+ <device name="headphone">
+ <path name="on">
+ <ctl name="Jack Enable" val="1"/>
+ </path>
+ <path name="off">
+ <ctl name="Jack Enable" val="0"/>
+ </path>
+ <path name="pcm_out_en">
+ <ctl name="PCM Jack switch" val="1"/>
+ </path>
+ <path name="pcm_out_dis">
+ <ctl name="PCM Jack switch" val="0"/>
+ </path>
+ </device>
+
+<!-- Following the device definitions there must be a <stream> entry
+for every output and input stream supported by the hardware.
+There are two types of stream that can be declared here:
+- anonymous streams, these will be used to supply playback and record
+ streams for AudioFlinger
+
+- named streams, which are mainly used to declare custom streams to handle
+ special routing use-cases that are external to the normal Android audio
+ path - typically where the audio is routed entirely in hardware without
+ being passed through Android, for example the baseband audio link or
+ FM radio.
+
+For standard anonymous streams there are two that would normally be on
+any device: PCM input and PCM output. It is also possible to declare a stream
+as "compress" - this is intended for cases where a playback stream is
+decompressed in hardware, or a record stream provides raw compressed data that
+must be decompressed in software.
+
+Named streams can be declared as type "hw", to represent a hardware-hardware
+link where specifying a data type and direction would be meaningless.
+
+Mandatory attributes for PCM and compressed streams:
+ type must be "pcm" or "compress"
+ dir direction of stream, either "in" (recording) or "out" (playback)
+
+Mandatory for named streams:
+ type must be "pcm", "compress" or "hw"
+ name a custom name for a named stream. The name you choose here must
+ match the name your HAL will use to request this stream
+
+Mandatory for hw streams:
+ type must be "hw"
+ name a custom name for the stream (hw streams must be named streams)
+
+Optional attributes:
+ card ALSA card number. If not given this defaults to 0
+ device ALSA device number. If not given this defaults to 0
+ instances limits the maximum number of instances of this stream, if not
+ specified the number of instances is unlimited
+ name a custom name for a named stream. The name you choose here must
+ match the name your HAL will use to request this stream
+
+Anonymous PCM streams should not normally have an instance limit.
+-->
+
+ <stream type="pcm" dir="out" card="0" device="0">
+ <!-- The <enable> and <disable> tags give the name of a path
+ to apply for each connected device when the stream is either connected
+ to (enable) or disconnected from (disable) that device.
+ The way this works is that when stream routing changes, the HAL will
+ look through the paths of each device this stream is connected to,
+ - for each device the stream is being disconnected from, if it
+ contains a path matching the path name in <disable>, that path
+ will be applied.
+ - for each device the stream is being connected to, if it
+ contains a path matching the path name in <enable>, that path
+ will be applied.
+ -->
+ <enable path="pcm_out_en"/>
+ <disable path="pcm_out_dis"/>
+
+ <!-- The optional usecase block allows you to define custom use-cases that
+ are triggered by set_parameter() calls to the HAL. The set_parameter()
+ is a string of the form <setting>=<value>. The HAL will search for a
+ usecase block whose name attribute matches <setting> and within that
+ a case block whose name attribute matches <value>. If a matching case
+ block is found the enclosed <ctl> blocks will be applied.
+ The example below defines a use case for switching a codec algorithm
+ between wideband and narrowband response. The two cases will be
+ triggered by a set_parameter() of "bandwidth=narrow" or "bandwidth=wide".
+ -->
+ <usecase name="bandwidth">
+ <case name="narrow">
+ <ctl name="Codec Wideband" val="0" />
+ </case>
+ <case name="wide">
+ <ctl name="Codec Wideband" val="1" />
+ </case>
+ </usecase>
+
+ </stream>
+
+ <stream type="pcm" dir="in" card="0" device="0">
+ </stream>
+
+ <!-- Example named stream, in this case for an FM radio path . This will not
+ be available for standard AudioFlinger playback and record paths. It must
+ be explicitly requested by the audio HAL when FM radio is enabled
+ -->
+ <stream name="FM radio" type="pcm" dir="in" card="0" device="0">
+ <!-- control paths to be enabled when this stream is enabled or disabled -->
+ <enable path="fm_radio_en"/>
+ <disable path="fm_radio_dis"/>
+ </stream>
+
+</audiohal>
diff --git a/audio_config.c b/audio_config.c
new file mode 100644
index 0000000..34bccd3
--- /dev/null
+++ b/audio_config.c
@@ -0,0 +1,2154 @@
+/*
+ * Copyright (C) 2012-2013 Wolfson Microelectronics plc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "tiny_hal_config"
+/*#define LOG_NDEBUG 0*/
+/*#undef NDEBUG*/
+
+#include <stddef.h>
+#include <errno.h>
+#include <assert.h>
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <cutils/compiler.h>
+
+#include <system/audio.h>
+
+/* Workaround for linker error if audio_effect.h is included in multiple
+ * source files. Prevent audio.h including it */
+#define ANDROID_AUDIO_EFFECT_H
+struct effect_interface_s;
+typedef struct effect_interface_s **effect_handle_t;
+#include <hardware/audio.h>
+
+#include <tinyalsa/asoundlib.h>
+#include <expat.h>
+
+#include "audio_config.h"
+
+#define MIXER_CARD_DEFAULT 0
+#define PCM_CARD_DEFAULT 0
+#define PCM_DEVICE_DEFAULT 0
+#define COMPRESS_CARD_DEFAULT 0
+#define COMPRESS_DEVICE_DEFAULT 0
+
+/* The dynamic arrays are extended in multiples of this number of objects */
+#define DYN_ARRAY_GRANULE 16
+
+/* Largest byte array control we handle */
+#define BYTE_ARRAY_MAX_SIZE 512
+
+#define INVALID_CTL_INDEX 0xFFFFFFFFUL
+
+struct config_mgr;
+struct stream;
+struct path;
+struct device;
+struct usecase;
+struct scase;
+
+/* Dynamically extended array of fixed-size objects */
+struct dyn_array {
+ uint count;
+ uint16_t max_count; /* current maximum size of allocated array */
+ uint16_t elem_size; /* size of array elements */
+ union {
+ void *data;
+ struct device *devices;
+ struct stream *streams;
+ struct path *paths;
+ struct usecase *usecases;
+ struct scase *cases;
+ struct ctl *ctls;
+ const char **path_names;
+ };
+};
+
+/* Paths for "on" and "off" are a special case and have fixed ids */
+enum {
+ e_path_id_off = 0,
+ e_path_id_on = 1,
+ e_path_id_custom_base = 2
+};
+
+struct ctl {
+ struct mixer_ctl *ctl;
+ uint32_t index;
+ uint32_t array_count;
+ union {
+ uint32_t uinteger;
+ const char *name;
+ const uint8_t *data;
+ } value;
+};
+
+struct path {
+ int id; /* Integer identifier of this path */
+ struct dyn_array ctl_array;
+};
+
+struct device {
+ uint32_t type; /* 0 is reserved for the global device */
+ int use_count; /* counts total streams using this device */
+ struct dyn_array path_array;
+};
+
+struct scase {
+ const char *name;
+ struct dyn_array ctl_array;
+};
+
+struct usecase {
+ const char *name;
+ struct dyn_array case_array;
+};
+
+struct stream_control {
+ struct mixer_ctl *ctl;
+ uint id;
+ uint min;
+ uint max;
+};
+
+struct volume_control {
+ struct stream_control control;
+ uint min;
+ uint max;
+};
+
+struct stream {
+ struct hw_stream info; /* must be first member */
+
+ struct config_mgr* cm;
+ const char* name;
+
+ int ref_count;
+ int max_ref_count;
+
+ int enable_path; /* id of paths to invoke when enabled */
+ int disable_path; /* id of paths to invoke when disabled */
+
+ uint32_t current_devices; /* devices currently active for this stream */
+
+ struct {
+ struct stream_control volume_left;
+ struct stream_control volume_right;
+ } controls;
+
+ struct dyn_array usecase_array;
+};
+
+struct config_mgr {
+ pthread_mutex_t lock;
+
+ struct mixer *mixer;
+
+ uint32_t supported_output_devices;
+ uint32_t supported_input_devices;
+
+ struct dyn_array device_array;
+ struct dyn_array stream_array;
+};
+
+/*********************************************************************
+ * Structures and enums for XML parser
+ *********************************************************************/
+
+#define MAX_PARSE_DEPTH 6
+
+/* For faster parsing put more commonly-used elements first */
+enum element_index {
+ e_elem_ctl = 0,
+ e_elem_path,
+ e_elem_device,
+ e_elem_stream,
+ e_elem_enable,
+ e_elem_disable,
+ e_elem_case,
+ e_elem_usecase,
+ e_elem_stream_ctl,
+ e_elem_init,
+ e_elem_mixer,
+ e_elem_audiohal,
+
+ e_elem_count
+};
+
+/* For faster parsing put more commonly-used attribs first */
+enum attrib_index {
+ e_attrib_name = 0,
+ e_attrib_val,
+ e_attrib_path,
+ e_attrib_function,
+ e_attrib_type,
+ e_attrib_index,
+ e_attrib_dir,
+ e_attrib_card,
+ e_attrib_device,
+ e_attrib_instances,
+ e_attrib_rate,
+ e_attrib_period_size,
+ e_attrib_period_count,
+ e_attrib_min,
+ e_attrib_max,
+
+ e_attrib_count
+};
+
+#define BIT(x) (1<<(x))
+
+struct parse_state;
+typedef int(*elem_fn)(struct parse_state *state);
+
+struct parse_element {
+ const char *name;
+ uint16_t valid_attribs; /* bitflags of valid attribs for this element */
+ uint16_t required_attribs; /* bitflags of attribs that must be present */
+ uint16_t valid_subelem; /* bitflags of valid sub-elements */
+ elem_fn start_fn;
+ elem_fn end_fn;
+};
+
+struct parse_attrib {
+ const char *name;
+};
+
+struct parse_device {
+ const char *name;
+ uint32_t device;
+};
+
+struct parse_stack_entry {
+ uint16_t elem_index;
+ uint16_t valid_subelem;
+};
+
+/* Temporary state info for config file parser */
+struct parse_state {
+ struct config_mgr *cm;
+ FILE *file;
+ XML_Parser parser;
+ char read_buf[256];
+ int parse_error; /* value >0 aborts without error */
+ int error_line;
+ int mixer_card_number;
+
+ struct {
+ const char *value[e_attrib_count];
+ const XML_Char **all;
+ } attribs;
+
+ /* Current parent object. We don't need a stack because
+ * an object cannot be nested below an object of the same type
+ * so there can only ever be zero or one object of a given type
+ * active at any time
+ */
+ struct {
+ struct device *device;
+ struct stream *stream;
+ struct path *path;
+ struct usecase *usecase;
+ struct scase *scase;
+ } current;
+
+ /* This array hold a de-duplicated list of all path names encountered */
+ struct dyn_array path_name_array;
+
+ /* This is a temporary path object used to collect the initial
+ * mixer setup control settings under <mixer><init>
+ */
+ struct path init_path;
+
+ struct {
+ int index;
+ struct parse_stack_entry entry[MAX_PARSE_DEPTH];
+ } stack;
+};
+
+
+static const char *debug_device_to_name(uint32_t device);
+
+/*********************************************************************
+ * Routing control
+ *********************************************************************/
+
+static void apply_ctls_l( struct ctl *pctl, const int ctl_count )
+{
+ int i;
+ unsigned int vnum;
+ unsigned int value_count;
+ int err = 0;
+ uint8_t ctl_data[BYTE_ARRAY_MAX_SIZE];
+
+ ALOGV("+apply_ctls_l");
+
+ for (i = 0; i < ctl_count; ++i, ++pctl) {
+ switch (mixer_ctl_get_type(pctl->ctl)) {
+ case MIXER_CTL_TYPE_BOOL:
+ case MIXER_CTL_TYPE_INT:
+ value_count = mixer_ctl_get_num_values(pctl->ctl);
+
+ ALOGV("apply ctl '%s' = 0x%x (%d values)",
+ mixer_ctl_get_name(pctl->ctl),
+ pctl->value.uinteger,
+ value_count);
+
+ if (pctl->index == INVALID_CTL_INDEX) {
+ for (vnum = 0; vnum < value_count; ++vnum) {
+ err = mixer_ctl_set_value(pctl->ctl, vnum, pctl->value.uinteger);
+ if (err < 0) {
+ break;
+ }
+ }
+ } else {
+ err = mixer_ctl_set_value(pctl->ctl, pctl->index, pctl->value.uinteger);
+ }
+ ALOGE_IF(err < 0, "Failed to set ctl '%s' to %u",
+ mixer_ctl_get_name(pctl->ctl),
+ pctl->value.uinteger);
+ break;
+
+ case MIXER_CTL_TYPE_BYTE:
+ /* byte array */
+ vnum = mixer_ctl_get_num_values(pctl->ctl);
+
+ ALOGV("apply ctl '%s' = byte data (%d bytes)",
+ mixer_ctl_get_name(pctl->ctl),
+ vnum);
+
+ if ((pctl->index == 0) && (pctl->array_count == vnum)) {
+ err = mixer_ctl_set_array(pctl->ctl, pctl->value.data, pctl->array_count);
+ } else {
+ /* read-modify-write */
+ err = mixer_ctl_get_array(pctl->ctl, ctl_data, vnum);
+ if (err >= 0) {
+ memcpy(&ctl_data[pctl->index], pctl->value.data, pctl->array_count);
+ err = mixer_ctl_set_array(pctl->ctl, ctl_data, vnum);
+ }
+ }
+
+ ALOGE_IF(err < 0, "Failed to set ctl '%s'",
+ mixer_ctl_get_name(pctl->ctl));
+ break;
+
+ case MIXER_CTL_TYPE_ENUM:
+ ALOGV("apply ctl '%s' to '%s'",
+ mixer_ctl_get_name(pctl->ctl),
+ pctl->value.name);
+
+ err = mixer_ctl_set_enum_by_string(pctl->ctl, pctl->value.name);
+
+ ALOGE_IF(err < 0, "Failed to set ctl '%s' to '%s'",
+ mixer_ctl_get_name(pctl->ctl),
+ pctl->value.name);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ ALOGV("-apply_ctls_l");
+}
+
+static void apply_path_l(struct path *path)
+{
+ ALOGV("+apply_path_l(%p) id=%u", path, path->id);
+
+ apply_ctls_l(path->ctl_array.ctls, path->ctl_array.count);
+
+ ALOGV("-apply_path_l(%p)", path);
+}
+
+static void apply_device_path_l(struct device *pdev, struct path *path)
+{
+ ALOGV("+apply_device_path_l(%p) id=%u", path, path->id);
+
+ /* The on and off paths for a device are reference-counted */
+ switch (path->id) {
+ case e_path_id_off:
+ if (--pdev->use_count > 0) {
+ ALOGV("Device still in use - not applying 'off' path");
+ return;
+ }
+ break;
+
+ case e_path_id_on:
+ if (++pdev->use_count > 1) {
+ ALOGV("Device already enabled - not applying 'on' path");
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ apply_path_l(path);
+
+ ALOGV("-apply_device_path_l(%p)", path);
+}
+
+static void apply_paths_by_id_l(struct device *pdev, int first_id,
+ int second_id)
+{
+ struct path *ppath = pdev->path_array.paths;
+ struct path *found_paths[2] = {0};
+ int path_count = pdev->path_array.count;
+
+ ALOGV("Applying paths [first=%u second=%u] to device(@%p, mask=0x%x '%s')",
+ first_id, second_id, ppath, pdev->type, debug_device_to_name(pdev->type));
+
+ /* To save time we find both paths in a single walk of the list */
+ for (; path_count > 0; --path_count, ++ppath) {
+ if (ppath->id == first_id) {
+ found_paths[0] = ppath;
+ if ((found_paths[1] != NULL) || (first_id == second_id)) {
+ /* We have both paths or there is only one path to find */
+ break;
+ }
+ } else if (ppath->id == second_id) {
+ found_paths[1] = ppath;
+ if (found_paths[0] != NULL) {
+ break;
+ }
+ }
+ }
+
+ if (found_paths[0] != NULL) {
+ apply_device_path_l(pdev, found_paths[0]);
+ }
+
+ if (found_paths[1] != NULL) {
+ apply_device_path_l(pdev, found_paths[1]);
+ }
+}
+
+static void apply_paths_to_devices_l(struct config_mgr *cm, uint32_t devices,
+ int first_id, int second_id)
+{
+ struct device *pdev = cm->device_array.devices;
+ int dev_count = cm->device_array.count;
+ const uint32_t input_flag = devices & AUDIO_DEVICE_BIT_IN;
+
+ /* invoke path path_id on all struct device matching devices */
+ ALOGV("Apply paths [first=%u second=%u] to devices in 0x%x",
+ first_id, second_id, devices);
+
+ devices &= ~AUDIO_DEVICE_BIT_IN;
+
+ while ((dev_count > 0) && (devices != 0)) {
+ if (((pdev->type & input_flag) == input_flag)
+ && ((pdev->type & devices) != 0)) {
+ devices &= ~pdev->type;
+ apply_paths_by_id_l(pdev, first_id, second_id);
+ }
+
+ --dev_count;
+ ++pdev;
+ }
+}
+
+static void apply_paths_to_global_l(struct config_mgr *cm,
+ int first_id, int second_id)
+{
+ struct device *pdev = cm->device_array.devices;
+ struct device * const pend = pdev + cm->device_array.count;
+
+ ALOGV("Apply global paths [first=%u second=%u]", first_id, second_id);
+
+ while (pdev < pend) {
+ if (pdev->type == 0) {
+ apply_paths_by_id_l(pdev, first_id, second_id);
+ break;
+ }
+ ++pdev;
+ }
+}
+
+uint32_t get_current_routes( const struct hw_stream *stream )
+{
+ struct stream *s = (struct stream *)stream;
+ ALOGV("get_current_routes(%p) 0x%x", stream, s->current_devices);
+ return s->current_devices;
+}
+
+void apply_route( const struct hw_stream *stream, uint32_t devices )
+{
+ struct stream *s = (struct stream *)stream;
+ struct config_mgr *cm = s->cm;
+
+ /* Only apply routes to devices that have changed state on this stream */
+ uint32_t enabling = devices & ~s->current_devices;
+ uint32_t disabling = ~devices & s->current_devices;
+
+ ALOGV("apply_route(%p) devices=0x%x", stream, devices);
+
+ if (stream_is_input(stream)) {
+ devices &= AUDIO_DEVICE_IN_ALL;
+ devices |= AUDIO_DEVICE_BIT_IN;
+ } else {
+ devices &= AUDIO_DEVICE_OUT_ALL;
+ }
+
+ pthread_mutex_lock(&cm->lock);
+
+ apply_paths_to_devices_l(cm, disabling, s->disable_path, e_path_id_off);
+ apply_paths_to_devices_l(cm, enabling, e_path_id_on, s->enable_path);
+
+ /* Save new set of devices for this stream */
+ s->current_devices = devices;
+
+ pthread_mutex_unlock(&cm->lock);
+}
+
+uint32_t get_routed_devices( const struct hw_stream *stream )
+{
+ struct stream *s = (struct stream *)stream;
+ return s->current_devices;
+}
+
+void rotate_routes( struct config_mgr *cm, int orientation )
+{
+ /* Route rotation not currently supported */
+}
+
+/*********************************************************************
+ * Stream control
+ *********************************************************************/
+
+static int set_vol_ctl( const struct stream_control *volctl, uint percent)
+{
+ uint val;
+ uint range;
+
+ switch (percent) {
+ case 0:
+ val = volctl->min;
+ break;
+
+ case 100:
+ val = volctl->max;
+ break;
+
+ default:
+ range = volctl->max - volctl->min;
+ val = volctl->min + ((percent * range)/100);
+ break;
+ }
+
+ ALOGW("set '%s' = %u", mixer_ctl_get_name(volctl->ctl), val);
+ mixer_ctl_set_value(volctl->ctl, volctl->id, val);
+ return 0;
+}
+
+int set_hw_volume( const struct hw_stream *stream, int left_pc, int right_pc)
+{
+ struct stream *s = (struct stream *)stream;
+ int ret = -ENOSYS;
+
+ if (s->controls.volume_left.ctl) {
+ if (!s->controls.volume_right.ctl) {
+ /* Control is mono so average left and right */
+ left_pc = (left_pc + right_pc) / 2;
+ }
+
+ ret = set_vol_ctl(&s->controls.volume_left, left_pc);
+ }
+
+ if (s->controls.volume_right.ctl) {
+ ret = set_vol_ctl(&s->controls.volume_right, right_pc);
+ }
+
+ ALOGV_IF(ret == 0, "set_hw_volume: L=%d%% R=%d%%", left_pc, right_pc);
+
+ return ret;
+}
+
+static struct stream *find_named_stream(struct config_mgr *cm,
+ const char *name)
+{
+ struct stream *s = cm->stream_array.streams;
+ int i;
+
+ for (i = cm->stream_array.count - 1; i >= 0; --i) {
+ if (s->name) {
+ if (strcmp(s->name, name) == 0) {
+ return s;
+ }
+ }
+ s++;
+ }
+ return NULL;
+}
+
+static bool open_stream_l(struct config_mgr *cm, struct stream *s)
+{
+ if (s->ref_count < s->max_ref_count) {
+ ++s->ref_count;
+ if (s->ref_count == 1) {
+ apply_paths_to_global_l(cm, e_path_id_on, s->enable_path);
+ }
+ return true;
+ } else {
+ ALOGV("stream at maximum refcount %d", s->ref_count);
+ return false;
+ }
+}
+
+const struct hw_stream *get_stream(struct config_mgr *cm,
+ const audio_devices_t devices,
+ const audio_output_flags_t flags,
+ const struct audio_config *config )
+{
+ int i;
+ struct stream *s = cm->stream_array.streams;
+ const bool pcm = audio_is_linear_pcm(config->format);
+ enum stream_type type;
+
+ ALOGV("+get_stream devices=0x%x flags=0x%x format=0x%x",
+ devices, flags, config->format );
+
+ if (devices & AUDIO_DEVICE_BIT_IN) {
+ type = pcm ? e_stream_in_pcm : e_stream_in_compress;
+ } else {
+ type = pcm ? e_stream_out_pcm : e_stream_out_compress;
+ }
+
+ pthread_mutex_lock(&cm->lock);
+ for (i = cm->stream_array.count - 1; i >= 0; --i) {
+ ALOGV("get_stream: require type=%d; try type=%d refcount=%d refmax=%d",
+ type, s[i].info.type, s[i].ref_count, s[i].max_ref_count );
+ if (s[i].info.type == type) {
+ if (open_stream_l(cm, &s[i])) {
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&cm->lock);
+
+ if (i >= 0) {
+ ALOGV("-get_stream =%p (refcount=%d)", &s[i].info,
+ s[i].ref_count );
+ return &s[i].info;
+ } else {
+ ALOGE("-get_stream no suitable stream" );
+ return NULL;
+ }
+}
+
+const struct hw_stream *get_named_stream(struct config_mgr *cm,
+ const char *name)
+{
+ int i;
+ struct stream *s;
+
+ ALOGV("+get_named_stream '%s'", name);
+
+ /* Streams can't be deleted so don't need to hold the lock during search */
+ s = find_named_stream(cm, name);
+
+ pthread_mutex_lock(&cm->lock);
+ if (s != NULL) {
+ if (!open_stream_l(cm, s)) {
+ s = NULL;
+ }
+ }
+ pthread_mutex_unlock(&cm->lock);
+
+ if (s != NULL) {
+ ALOGV("-get_named_stream =%p (refcount=%d)", &s->info, s->ref_count );
+ return &s->info;
+ } else {
+ ALOGE("-get_named_stream no suitable stream" );
+ return NULL;
+ }
+}
+
+bool is_named_stream_defined(struct config_mgr *cm, const char *name)
+{
+ struct stream *s;
+
+ /* Streams can't be deleted so don't need to hold the lock during search */
+ s = find_named_stream(cm, name);
+
+ ALOGV("is_named_stream_defined '%s' = %d", name, (s != NULL));
+ return (s != NULL);
+}
+
+void release_stream( const struct hw_stream* stream )
+{
+ struct stream *s = (struct stream *)stream;
+
+ ALOGV("release_stream %p", stream );
+
+ if (s) {
+ pthread_mutex_lock(&s->cm->lock);
+ if (--s->ref_count == 0) {
+ /* Ensure all paths it was using are disabled */
+ apply_paths_to_devices_l(s->cm, s->current_devices,
+ e_path_id_off, s->disable_path);
+ apply_paths_to_global_l(s->cm, s->disable_path, e_path_id_off);
+ s->current_devices = 0;
+ }
+ pthread_mutex_unlock(&s->cm->lock);
+ }
+}
+
+uint32_t get_supported_output_devices( struct config_mgr *cm )
+{
+ const uint32_t d = cm->supported_output_devices;
+
+ ALOGV("get_supported_output_devices=0x%x", d);
+ return d;
+}
+
+uint32_t get_supported_input_devices( struct config_mgr *cm )
+{
+ const uint32_t d = cm->supported_input_devices;
+
+ ALOGV("get_supported_input_devices=0x%x", d);
+ return d;
+}
+
+/*********************************************************************
+ * Use-case control
+ *********************************************************************/
+int apply_use_case( const struct hw_stream* stream,
+ const char *setting,
+ const char *case_name)
+{
+ struct stream *s = (struct stream *)stream;
+ struct usecase *puc = s->usecase_array.usecases;
+ int usecase_count = s->usecase_array.count;
+ struct scase *pcase;
+ int case_count;
+ int ret;
+
+ ALOGV("apply_use_case(%p) %s=%s", stream, setting, case_name);
+
+ for (; usecase_count > 0; usecase_count--, puc++) {
+ if (0 == strcmp(puc->name, setting)) {
+ pcase = puc->case_array.cases;
+ case_count = puc->case_array.count;
+ for(; case_count > 0; case_count--, pcase++) {
+ if (0 == strcmp(pcase->name, case_name)) {
+ pthread_mutex_lock(&s->cm->lock);
+ apply_ctls_l(pcase->ctl_array.ctls, pcase->ctl_array.count);
+ pthread_mutex_unlock(&s->cm->lock);
+ ret = 0;
+ goto exit;
+ }
+ }
+ }
+ }
+
+ ret = -ENOSYS; /* use-case not implemented */
+
+exit:
+ return ret;
+}
+
+/*********************************************************************
+ * Config file parsing
+ *
+ * To keep this simple we restrict the order that config file entries
+ * may appear:
+ * - The <mixer> section must always appear first
+ * - Paths must be defined before they can be referred to
+ *********************************************************************/
+static int parse_mixer_start(struct parse_state *state);
+static int parse_device_start(struct parse_state *state);
+static int parse_device_end(struct parse_state *state);
+static int parse_stream_start(struct parse_state *state);
+static int parse_stream_end(struct parse_state *state);
+static int parse_stream_ctl_start(struct parse_state *state);
+static int parse_path_start(struct parse_state *state);
+static int parse_path_end(struct parse_state *state);
+static int parse_case_start(struct parse_state *state);
+static int parse_case_end(struct parse_state *state);
+static int parse_usecase_start(struct parse_state *state);
+static int parse_usecase_end(struct parse_state *state);
+static int parse_enable_start(struct parse_state *state);
+static int parse_disable_start(struct parse_state *state);
+static int parse_ctl_start(struct parse_state *state);
+static int parse_init_start(struct parse_state *state);
+
+static const struct parse_element elem_table[e_elem_count] = {
+ [e_elem_ctl] = {
+ .name = "ctl",
+ .valid_attribs = BIT(e_attrib_name) | BIT(e_attrib_val) | BIT(e_attrib_index),
+ .required_attribs = BIT(e_attrib_name) | BIT(e_attrib_val),
+ .valid_subelem = 0,
+ .start_fn = parse_ctl_start,
+ .end_fn = NULL
+ },
+
+ [e_elem_path] = {
+ .name = "path",
+ .valid_attribs = BIT(e_attrib_name),
+ .required_attribs = BIT(e_attrib_name),
+ .valid_subelem = BIT(e_elem_ctl),
+ .start_fn = parse_path_start,
+ .end_fn = parse_path_end
+ },
+
+ [e_elem_device] = {
+ .name = "device",
+ .valid_attribs = BIT(e_attrib_name),
+ .required_attribs = BIT(e_attrib_name),
+ .valid_subelem = BIT(e_elem_path),
+ .start_fn = parse_device_start,
+ .end_fn = parse_device_end
+ },
+
+ [e_elem_stream] = {
+ .name = "stream",
+ .valid_attribs = BIT(e_attrib_name) | BIT(e_attrib_type)
+ | BIT(e_attrib_dir) | BIT(e_attrib_card)
+ | BIT(e_attrib_device) | BIT(e_attrib_instances)
+ | BIT(e_attrib_rate) | BIT(e_attrib_period_size)
+ | BIT(e_attrib_period_count),
+ .required_attribs = BIT(e_attrib_type),
+ .valid_subelem = BIT(e_elem_stream_ctl)
+ | BIT(e_elem_enable) | BIT(e_elem_disable)
+ | BIT(e_elem_usecase),
+ .start_fn = parse_stream_start,
+ .end_fn = parse_stream_end
+ },
+
+ [e_elem_enable] = {
+ .name = "enable",
+ .valid_attribs = BIT(e_attrib_path),
+ .required_attribs = BIT(e_attrib_path),
+ .valid_subelem = 0,
+ .start_fn = parse_enable_start,
+ .end_fn = NULL
+ },
+
+ [e_elem_disable] = {
+ .name = "disable",
+ .valid_attribs = BIT(e_attrib_path),
+ .required_attribs = BIT(e_attrib_path),
+ .valid_subelem = 0,
+ .start_fn = parse_disable_start,
+ .end_fn = NULL
+ },
+
+ [e_elem_case] = {
+ .name = "case",
+ .valid_attribs = BIT(e_attrib_name),
+ .required_attribs = BIT(e_attrib_name),
+ .valid_subelem = BIT(e_elem_ctl),
+ .start_fn = parse_case_start,
+ .end_fn = parse_case_end
+ },
+
+ [e_elem_usecase] = {
+ .name = "usecase",
+ .valid_attribs = BIT(e_attrib_name),
+ .required_attribs = BIT(e_attrib_name),
+ .valid_subelem = BIT(e_elem_case),
+ .start_fn = parse_usecase_start,
+ .end_fn = parse_usecase_end
+ },
+
+ [e_elem_stream_ctl] = {
+ .name = "ctl",
+ .valid_attribs = BIT(e_attrib_name) | BIT(e_attrib_function)
+ | BIT(e_attrib_index)
+ | BIT(e_attrib_min) | BIT(e_attrib_max),
+ .required_attribs = BIT(e_attrib_name) | BIT(e_attrib_function),
+ .valid_subelem = 0,
+ .start_fn = parse_stream_ctl_start,
+ .end_fn = NULL
+ },
+
+ [e_elem_init] = {
+ .name = "init",
+ .valid_attribs = 0,
+ .required_attribs = 0,
+ .valid_subelem = BIT(e_elem_ctl),
+ .start_fn = parse_init_start,
+ .end_fn = NULL
+ },
+
+ [e_elem_mixer] = {
+ .name = "mixer",
+ .valid_attribs = BIT(e_attrib_card),
+ .required_attribs = 0,
+ .valid_subelem = BIT(e_elem_init),
+ .start_fn = parse_mixer_start,
+ .end_fn = NULL
+ },
+
+ [e_elem_audiohal] = {
+ .name = "audiohal",
+ .valid_attribs = 0,
+ .required_attribs = 0,
+ .valid_subelem = BIT(e_elem_mixer),
+ .start_fn = NULL,
+ .end_fn = NULL
+ }
+};
+
+static const struct parse_attrib attrib_table[e_attrib_count] = {
+ [e_attrib_name] = {"name"},
+ [e_attrib_val] = {"val"},
+ [e_attrib_path] = {"path"},
+ [e_attrib_function] = {"function"},
+ [e_attrib_type] = {"type"},
+ [e_attrib_index] = {"index"},
+ [e_attrib_dir] = {"dir"},
+ [e_attrib_card] = {"card"},
+ [e_attrib_device] = {"device"},
+ [e_attrib_instances] = {"instances"},
+ [e_attrib_rate] = {"rate"},
+ [e_attrib_period_size] = {"period_size"},
+ [e_attrib_period_count] = {"period_count"},
+ [e_attrib_min] = {"min"},
+ [e_attrib_max] = {"max"}
+ };
+
+static const struct parse_device device_table[] = {
+ {"global", 0}, /* special dummy device for global settings */
+ {"speaker", AUDIO_DEVICE_OUT_SPEAKER},
+ {"earpiece", AUDIO_DEVICE_OUT_EARPIECE},
+ {"headset", AUDIO_DEVICE_OUT_WIRED_HEADSET},
+ {"headset_in", AUDIO_DEVICE_IN_WIRED_HEADSET},
+ {"headphone", AUDIO_DEVICE_OUT_WIRED_HEADPHONE},
+ {"sco", AUDIO_DEVICE_OUT_ALL_SCO},
+ {"sco_in", AUDIO_DEVICE_IN_ALL_SCO},
+ {"a2dp", AUDIO_DEVICE_OUT_ALL_A2DP},
+ {"usb", AUDIO_DEVICE_OUT_ALL_USB},
+ {"mic", AUDIO_DEVICE_IN_BUILTIN_MIC},
+ {"back mic", AUDIO_DEVICE_IN_BACK_MIC},
+ {"voice", AUDIO_DEVICE_IN_VOICE_CALL},
+ {"aux", AUDIO_DEVICE_IN_AUX_DIGITAL}
+};
+
+static const char *predefined_path_name_table[] = {
+ [e_path_id_off] = "off",
+ [e_path_id_on] = "on"
+};
+
+static int dyn_array_extend(struct dyn_array *array)
+{
+ const uint elem_size = array->elem_size;
+ const uint new_count = array->count + 1;
+ uint max_count = array->max_count;
+ uint old_size, new_size;
+ void *p;
+ uint8_t *pbyte;
+
+ if (new_count > max_count) {
+ if (max_count > 0xFFFF - DYN_ARRAY_GRANULE) {
+ return -ENOMEM;
+ }
+
+ old_size = max_count * elem_size;
+ max_count += DYN_ARRAY_GRANULE;
+ new_size = max_count * elem_size;
+
+ p = realloc(array->data, new_size);
+ if (!p) {
+ return -ENOMEM;
+ }
+
+ pbyte = p;
+ memset(pbyte + old_size, 0, new_size - old_size);
+
+ array->data = p;
+ array->max_count = max_count;
+ }
+
+ array->count = new_count;
+ return 0;
+}
+
+static void dyn_array_fix(struct dyn_array *array)
+{
+ /* Fixes the allocated memory to exactly the required length
+ * This will always be a shrink, discarding granular allocations
+ * that we don't need */
+ const uint size = array->count * array->elem_size;
+ void *p = realloc(array->data, size);
+
+ if (p)
+ {
+ array->data = p;
+ array->max_count = array->count;
+ }
+}
+
+static void dyn_array_free(struct dyn_array *array)
+{
+ free(array->data);
+}
+
+static struct ctl* new_ctl(struct dyn_array *array, struct mixer_ctl *ctl)
+{
+ struct ctl *c;
+
+ if (dyn_array_extend(array) < 0) {
+ return NULL;
+ }
+
+ c = &array->ctls[array->count - 1];
+ c->index = INVALID_CTL_INDEX;
+ c->ctl = ctl;
+ return c;
+}
+
+static void compress_ctl(struct ctl *ctl)
+{
+}
+
+static struct path* new_path(struct dyn_array *array, int id)
+{
+ struct path *path;
+
+ if (dyn_array_extend(array) < 0) {
+ return NULL;
+ }
+
+ path = &array->paths[array->count - 1];
+ path->ctl_array.elem_size = sizeof(struct ctl);
+ path->id = id;
+ return path;
+}
+
+static void compress_path(struct path *path)
+{
+ dyn_array_fix(&path->ctl_array);
+}
+
+static struct scase* new_case(struct dyn_array *array, const char *name)
+{
+ struct scase *sc;
+
+ if (dyn_array_extend(array) < 0) {
+ return NULL;
+ }
+
+ sc = &array->cases[array->count - 1];
+ sc->ctl_array.elem_size = sizeof(struct ctl);
+ sc->name = name;
+ return sc;
+}
+
+static void compress_case(struct scase *sc)
+{
+ dyn_array_fix(&sc->ctl_array);
+}
+
+static struct usecase* new_usecase(struct dyn_array *array, const char *name)
+{
+ struct usecase *puc;
+
+ if (dyn_array_extend(array) < 0) {
+ return NULL;
+ }
+
+ puc = &array->usecases[array->count - 1];
+ puc->case_array.elem_size = sizeof(struct scase);
+ puc->name = name;
+ return puc;
+}
+
+static void compress_usecase(struct usecase *puc)
+{
+ dyn_array_fix(&puc->case_array);
+}
+
+static struct device* new_device(struct dyn_array *array, uint32_t type)
+{
+ struct device *d;
+
+ if (dyn_array_extend(array) < 0) {
+ return NULL;
+ }
+
+ d = &array->devices[array->count - 1];
+ d->path_array.elem_size = sizeof(struct path);
+ d->type = type;
+ return d;
+}
+
+static void compress_device(struct device *d)
+{
+ dyn_array_fix(&d->path_array);
+}
+
+static struct stream* new_stream(struct dyn_array *array, struct config_mgr *cm)
+{
+ struct stream *s;
+
+ if (dyn_array_extend(array) < 0) {
+ return NULL;
+ }
+
+ s = &array->streams[array->count - 1];
+ s->usecase_array.elem_size = sizeof(struct usecase);
+ s->cm = cm;
+ s->enable_path = -1; /* by default no special path to invoke */
+ s->disable_path = -1;
+ return s;
+}
+
+static void compress_stream(struct stream *s)
+{
+ dyn_array_fix(&s->usecase_array);
+}
+
+static int new_name(struct dyn_array *array, const char* name)
+{
+ int i;
+
+ if (dyn_array_extend(array) < 0) {
+ return -ENOMEM;
+ }
+
+ i = array->count - 1;
+ array->path_names[i] = name;
+ return i;
+}
+
+static struct config_mgr* new_config_mgr()
+{
+ struct config_mgr* mgr = calloc(1, sizeof(struct config_mgr));
+ if (!mgr) {
+ return NULL;
+ }
+ mgr->device_array.elem_size = sizeof(struct device);
+ mgr->stream_array.elem_size = sizeof(struct stream);
+ pthread_mutex_init(&mgr->lock, NULL);
+ return mgr;
+}
+
+static void compress_config_mgr(struct config_mgr *mgr)
+{
+ dyn_array_fix(&mgr->device_array);
+ dyn_array_fix(&mgr->stream_array);
+}
+
+static int find_path_name(struct parse_state *state, const char *name)
+{
+ struct dyn_array *array = &state->path_name_array;
+ int i;
+
+ for (i = array->count - 1; i >= 0; --i) {
+ if (0 == strcmp(array->path_names[i], name)) {
+ ALOGV("Existing path '%s' id=%d", name, i);
+ return i; /* found - return existing index */
+ }
+ }
+ return -EINVAL;
+}
+
+static int add_path_name(struct parse_state *state, const char *name)
+{
+ struct dyn_array *array = &state->path_name_array;
+ int index;
+ const char *s;
+
+ /* Check if already in array */
+ index = find_path_name(state, name);
+ if (index >= 0) {
+ return index; /* already exists */
+ }
+
+ s = strdup(name);
+ if (s == NULL) {
+ return -ENOMEM;
+ }
+
+ index = new_name(array, s);
+ if (index < 0) {
+ return -ENOMEM;
+ }
+
+ ALOGV("New path '%s' id=%d", name, index);
+ return index;
+}
+
+static void path_names_free(struct parse_state *state)
+{
+ struct dyn_array *array = &state->path_name_array;
+ int i;
+
+ for (i = array->count - 1; i >= 0; --i) {
+ free((void*)array->path_names[i]);
+ }
+ dyn_array_free(array);
+}
+
+static int string_to_uint(uint32_t *result, const char *str)
+{
+ char *endptr;
+ unsigned long int v;
+
+ if (!str) {
+ return -ENOENT;
+ }
+
+ /* return error if not a valid decimal or hex number */
+ v = strtoul(str, &endptr, 0);
+ if ((endptr[0] == '\0') && (endptr != str) && (v <= INT_MAX)) {
+ *result = (uint32_t)v;
+ return 0;
+ } else {
+ ALOGE("'%s' not a valid number", str);
+ return -EINVAL;
+ }
+}
+
+static int attrib_to_uint(uint32_t *result, struct parse_state *state,
+ enum attrib_index index)
+{
+ const char *str = state->attribs.value[index];
+ return string_to_uint(result, str);
+}
+
+static int make_byte_array(struct parse_state *state, struct ctl *c)
+{
+ char *str = strdup(state->attribs.value[e_attrib_val]);
+ const unsigned int vnum = mixer_ctl_get_num_values(c->ctl);
+ uint8_t *bytes;
+ int count;
+ char *p;
+ uint32_t v;
+ int ret;
+
+ if (!str) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (vnum > BYTE_ARRAY_MAX_SIZE) {
+ ALOGE("Byte array control too big(%u)", vnum);
+ return -EINVAL;
+ }
+
+ if (c->index >= vnum) {
+ ALOGE("Control index out of range(%u>%u)", c->index, vnum);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* get number of entries in value string by counting commas */
+ p = strtok(str, ",");
+ for (count = 0; p != NULL; count++) {
+ p = strtok(NULL, ",");
+ }
+
+ if ((c->index + count) > vnum) {
+ ALOGE("Array overflows control (%u+%u > %u)",
+ c->index, count, vnum);
+ ret = -EINVAL;
+ goto fail;
+ }
+ c->array_count = count;
+
+ bytes = malloc(count);
+ if (!bytes) {
+ ALOGE("Out of memory for control data");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ c->value.data = bytes;
+
+ strcpy(str,state->attribs.value[e_attrib_val]);
+
+ for (p = strtok(str, ","); p != NULL;) {
+ ret = string_to_uint(&v, p);
+ if (ret != 0) {
+ goto fail;
+ }
+ ALOGE_IF(v > 0xFF, "Byte out of range");
+
+ *bytes++ = (uint8_t)v;
+ p = strtok(NULL, ",");
+ }
+
+ free(str);
+ return 0;
+
+fail:
+ free((void *)c->value.data);
+ free(str);
+ return ret;
+}
+
+static const struct parse_device *parse_match_device(const char *name)
+{
+ const struct parse_device *p = &device_table[0];
+ const struct parse_device *p_end =
+ &device_table[ sizeof(device_table) / sizeof(device_table[0]) ];
+
+ for (; p < p_end; ++p) {
+ if (0 == strcmp(name, p->name)) {
+ return p;
+ }
+ }
+
+ return NULL;
+}
+
+static const char *debug_device_to_name(uint32_t device)
+{
+ const struct parse_device *p = &device_table[0];
+ const struct parse_device *p_end =
+ &device_table[ sizeof(device_table) / sizeof(device_table[0]) ];
+
+ /* Assumes device contains a single bitflag plus direction bit */
+
+ for (; p < p_end; ++p) {
+ if (device == p->device) {
+ return p->name;
+ }
+ }
+
+ return "unknown";
+}
+
+
+static int parse_ctl_start(struct parse_state *state)
+{
+ const char *name = state->attribs.value[e_attrib_name];
+ const char *index = state->attribs.value[e_attrib_index];
+ struct dyn_array *array;
+ struct ctl *c;
+ struct mixer_ctl *ctl;
+ enum mixer_ctl_type ctl_type;
+ int ret;
+
+ if (state->current.path) {
+ ALOGV("parse_ctl_start:path ctl");
+ array = &state->current.path->ctl_array;
+ } else {
+ ALOGV("parse_ctl_start:case ctl");
+ array = &state->current.scase->ctl_array;
+ }
+
+ ctl = mixer_get_ctl_by_name(state->cm->mixer, name);
+ if (!ctl) {
+ ALOGE("Control '%s' not found", name);
+ return -EINVAL;
+ }
+
+ c = new_ctl(array, ctl);
+ if (c == NULL) {
+ return -ENOMEM;
+ }
+
+ if (attrib_to_uint(&c->index, state, e_attrib_index) == -EINVAL) {
+ ALOGE("Invalid ctl index");
+ return -EINVAL;
+ }
+
+ ctl_type = mixer_ctl_get_type(ctl);
+ switch(ctl_type)
+ {
+ case MIXER_CTL_TYPE_BYTE:
+ if (c->index == INVALID_CTL_INDEX) {
+ c->index = 0;
+ }
+ ret = make_byte_array(state, c);
+ if (ret != 0) {
+ return ret;
+ }
+ ALOGV("Added ctl '%s' byte array", name);
+ break;
+
+ case MIXER_CTL_TYPE_BOOL:
+ case MIXER_CTL_TYPE_INT:
+ if (attrib_to_uint(&c->value.uinteger, state, e_attrib_val)
+ == -EINVAL) {
+ return -EINVAL;
+ }
+ /* This log statement is just to aid to debugging */
+ ALOGE_IF((ctl_type == MIXER_CTL_TYPE_BOOL)
+ && (c->value.uinteger > 1),
+ "WARNING: Illegal value for bool control");
+ ALOGV("Added ctl '%s' value %u", name, c->value.uinteger);
+ break;
+
+ case MIXER_CTL_TYPE_ENUM:
+ c->value.name = strdup(state->attribs.value[e_attrib_val]);
+ if(!c->value.name) {
+ return -ENOMEM;
+ }
+ ALOGV("Added ctl '%s' value '%s'", name, c->value.name);
+ break;
+
+ case MIXER_CTL_TYPE_IEC958:
+ case MIXER_CTL_TYPE_INT64:
+ case MIXER_CTL_TYPE_UNKNOWN:
+ default:
+ ALOGE("Mixer control '%s' has unsupported type", name);
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int parse_init_start(struct parse_state *state)
+{
+ /* The <init> section inside <mixer> is really just a
+ * path that we only use once. We re-use the parsing of
+ * <ctl> entries by creating a temporary path which we
+ * apply at the end of parsing and then discard
+ */
+ state->current.path = &state->init_path;
+
+ ALOGV("Added init path");
+ return 0;
+}
+
+static int parse_path_start(struct parse_state *state)
+{
+ const char *name = state->attribs.value[e_attrib_name];
+ struct device *device = state->current.device;
+ struct dyn_array *array = &device->path_array;
+ struct path *path;
+ int id;
+
+ id = add_path_name(state, name);
+ if (id < 0) {
+ return id;
+ }
+
+ path = new_path(array, id);
+ if (path == NULL) {
+ return -ENOMEM;
+ }
+
+ state->current.path = path;
+
+ ALOGV("Added path '%s' id=%d", name, id);
+ return 0;
+}
+
+static int parse_path_end(struct parse_state *state)
+{
+ /* Free unused memory in the ctl array */
+ compress_path(state->current.path);
+ state->current.path = NULL;
+ return 0;
+}
+
+static int parse_case_start(struct parse_state *state)
+{
+ const char *name = strdup(state->attribs.value[e_attrib_name]);
+ struct usecase *puc = state->current.usecase;
+ struct dyn_array *array = &puc->case_array;
+ struct scase *sc;
+
+ if (!name) {
+ return -ENOMEM;
+ }
+
+ sc = new_case(array, name);
+ if (sc == NULL) {
+ return -ENOMEM;
+ }
+
+ state->current.scase = sc;
+
+ ALOGV("Added case '%s' to '%s'", name, puc->name);
+ return 0;
+}
+
+static int parse_case_end(struct parse_state *state)
+{
+ /* Free unused memory in the ctl array */
+ compress_case(state->current.scase);
+ state->current.scase = NULL;
+ return 0;
+}
+
+static int parse_usecase_start(struct parse_state *state)
+{
+ const char *name = strdup(state->attribs.value[e_attrib_name]);
+ struct dyn_array *array = &state->current.stream->usecase_array;
+ struct usecase *puc;
+
+ if (!name) {
+ return -ENOMEM;
+ }
+
+ puc = new_usecase(array, name);
+ if (puc == NULL) {
+ return -ENOMEM;
+ }
+
+ state->current.usecase = puc;
+
+ ALOGV("Added usecase '%s'", name);
+
+ return 0;
+}
+
+static int parse_usecase_end(struct parse_state *state)
+{
+ /* Free unused memory in the case array */
+ compress_usecase(state->current.usecase);
+ return 0;
+}
+
+static int parse_enable_disable_start(struct parse_state *state, bool is_enable)
+{
+ /* Handling of <enable> and <disable> is almost identical so
+ * they are both handled in this function
+ */
+
+ const char *path_name = state->attribs.value[e_attrib_path];
+ int i;
+
+ i = find_path_name(state,path_name);
+ if (i < 0) {
+ ALOGE("Path '%s' not defined", path_name);
+ return -EINVAL;
+ }
+
+ if (is_enable) {
+ ALOGV("Add enable path '%s' (id=%d)",
+ state->path_name_array.path_names[i], i);
+ state->current.stream->enable_path = i;
+ } else {
+ ALOGV("Add disable path '%s' (id=%d)",
+ state->path_name_array.path_names[i], i);
+ state->current.stream->disable_path = i;
+ }
+
+ return 0;
+}
+
+static int parse_enable_start(struct parse_state *state)
+{
+ return parse_enable_disable_start(state, true);
+}
+
+static int parse_disable_start(struct parse_state *state)
+{
+ return parse_enable_disable_start(state, false);
+}
+
+static int parse_stream_ctl_start(struct parse_state *state)
+{
+ /* Parse a <ctl> element within a stream which defines
+ * mixer controls - currently only supports volume controls
+ */
+
+ const char *name = state->attribs.value[e_attrib_name];
+ const char *function = state->attribs.value[e_attrib_function];
+ const char *index = state->attribs.value[e_attrib_index];
+ struct mixer_ctl *ctl;
+ struct stream_control *streamctl;
+ uint idx_val = 0;
+ uint32_t v;
+ int r;
+
+ ctl = mixer_get_ctl_by_name(state->cm->mixer, name);
+ if (!ctl) {
+ ALOGE("Control '%s' not found", name);
+ return -EINVAL;
+ }
+
+ if (index != NULL) {
+ if (attrib_to_uint(&idx_val, state, e_attrib_index) == -EINVAL) {
+ return -EINVAL;
+ }
+ }
+
+ if (0 == strcmp(function, "leftvol")) {
+ ALOGE_IF(state->current.stream->controls.volume_left.ctl,
+ "Left volume control specified again");
+ streamctl = &(state->current.stream->controls.volume_left);
+ } else if (0 == strcmp(function, "rightvol")) {
+ ALOGE_IF(state->current.stream->controls.volume_right.ctl,
+ "Right volume control specified again");
+ streamctl = &(state->current.stream->controls.volume_right);
+ } else {
+ ALOGE("'%s' is not a valid control function", function);
+ return -EINVAL;
+ }
+
+ streamctl->ctl = ctl;
+ streamctl->id = idx_val;
+
+ switch (attrib_to_uint(&v, state, e_attrib_min)) {
+ case -EINVAL:
+ return -EINVAL;
+
+ case -ENOENT:
+ /* Not specified, get control's min value */
+ r = mixer_ctl_get_range_min(ctl);
+ if (r < 0) {
+ ALOGE("Failed to get control min");
+ return r;
+ }
+ streamctl->min = (uint)r;
+ break;
+
+ default:
+ streamctl->min = v;
+ break;
+ }
+
+ switch (attrib_to_uint(&v, state, e_attrib_max)) {
+ case -EINVAL:
+ return -EINVAL;
+
+ case -ENOENT:
+ /* Not specified, get control's max value */
+ r = mixer_ctl_get_range_max(ctl);
+ if (r < 0) {
+ ALOGE("Failed to get control max");
+ return r;
+ }
+ streamctl->max = (uint)r;
+ break;
+
+ default:
+ streamctl->max = v;
+ break;
+ }
+
+ ALOGV("Added control '%s' function '%s' range %u-%u", name, function,
+ streamctl->min, streamctl->max);
+
+ return 0;
+}
+
+static int parse_stream_start(struct parse_state *state)
+{
+ const char *type = state->attribs.value[e_attrib_type];
+ const char *dir = state->attribs.value[e_attrib_dir];
+ const char *name = state->attribs.value[e_attrib_name];
+ bool out;
+ uint32_t card;
+ uint32_t device;
+ uint32_t maxref = INT_MAX;
+ struct stream *s;
+
+ if (name != NULL) {
+ name = strdup(name);
+ if (name == NULL) {
+ return -ENOMEM;
+ }
+ }
+
+ if (name != NULL) {
+ if (find_named_stream(state->cm, name) != NULL) {
+ ALOGE("Stream '%s' already declared", name);
+ return -EINVAL;
+ }
+ }
+
+ s = new_stream(&state->cm->stream_array, state->cm);
+ if (s == NULL) {
+ return -ENOMEM;
+ }
+
+ if (0 == strcmp(type, "hw")) {
+ if (name == NULL) {
+ ALOGE("Anonymous stream cannot be type hw");
+ return -EINVAL;
+ }
+ s->info.type = e_stream_hardware;
+ } else {
+ if (dir == NULL) {
+ ALOGE("dir tag missing");
+ return -EINVAL;
+ }
+
+ if (0 == strcmp(dir, "out")) {
+ out = true;
+ } else if (0 == strcmp(dir, "in")) {
+ out = false;
+ } else {
+ ALOGE("'%s' is not a valid direction", dir);
+ return -EINVAL;
+ }
+
+ if (0 == strcmp(type, "pcm")) {
+ s->info.type = out ? e_stream_out_pcm : e_stream_in_pcm;
+ card = PCM_CARD_DEFAULT;
+ device = PCM_DEVICE_DEFAULT;
+ } else if (0 == strcmp(type, "compress")) {
+ s->info.type = out ? e_stream_out_compress : e_stream_in_compress;
+ card = COMPRESS_CARD_DEFAULT;
+ device = COMPRESS_DEVICE_DEFAULT;
+ } else {
+ ALOGE("'%s' not a valid stream type", type);
+ return -EINVAL;
+ }
+ }
+
+ if (attrib_to_uint(&card, state, e_attrib_card) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ if (attrib_to_uint(&device, state, e_attrib_device) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ if (attrib_to_uint(&maxref, state, e_attrib_instances) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ if (attrib_to_uint(&s->info.rate, state, e_attrib_rate) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ if (attrib_to_uint(&s->info.period_count, state,
+ e_attrib_period_count) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ if (attrib_to_uint(&s->info.period_count, state,
+ e_attrib_period_size) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ s->name = name;
+ s->info.card_number = card;
+ s->info.device_number = device;
+ s->max_ref_count = maxref;
+
+ ALOGV("Added stream %s type=%u card=%u device=%u max_ref=%u",
+ s->name ? s->name : "",
+ s->info.type, s->info.card_number, s->info.device_number,
+ s->max_ref_count );
+
+ state->current.stream = s;
+
+ return 0;
+}
+
+static int parse_stream_end(struct parse_state *state)
+{
+ /* Free unused memory in the ctl array */
+ compress_stream(state->current.stream);
+ return 0;
+}
+
+static int parse_device_start(struct parse_state *state)
+{
+ const char *dev_name = state->attribs.value[e_attrib_name];
+ struct dyn_array *array = &state->cm->device_array;
+ uint32_t device_flag;
+ uint32_t *existing_devices;
+ const struct parse_device *p;
+ struct device* d;
+
+ p = parse_match_device(dev_name);
+
+ if (p == NULL) {
+ ALOGE("'%s' is not a valid device", dev_name);
+ return -EINVAL;
+ }
+
+ device_flag = p->device;
+
+ if (device_flag != 0) {
+ /* not the global device - add it to list of available devices */
+ if (device_flag & AUDIO_DEVICE_BIT_IN) {
+ existing_devices = &state->cm->supported_input_devices;
+ } else {
+ existing_devices = &state->cm->supported_output_devices;
+ }
+
+ if ((device_flag & *existing_devices) == device_flag) {
+ ALOGE("Device '%s' already defined", dev_name);
+ ALOGE("Device = 0x%x extisting_devices = 0x%x", device_flag, *existing_devices);
+ ALOGE("supported_output_devices=0x%x supported_input_devices=0x%x",
+ state->cm->supported_output_devices,
+ state->cm->supported_input_devices );
+ return -EINVAL;
+ }
+ *existing_devices |= device_flag;
+ }
+
+ ALOGV("Add device '%s'", dev_name);
+
+ d = new_device(array, device_flag);
+ if (d == NULL) {
+ return -ENOMEM;
+ }
+ state->current.device = d;
+
+ return 0;
+}
+
+static int parse_device_end(struct parse_state *state)
+{
+ /* Free unused memory in the path array */
+ compress_device(state->current.device);
+ return 0;
+}
+
+static int parse_mixer_start(struct parse_state *state)
+{
+ uint32_t card = MIXER_CARD_DEFAULT;
+
+ ALOGV("parse_mixer_start");
+
+ if (attrib_to_uint(&card, state, e_attrib_card) == -EINVAL) {
+ return -EINVAL;
+ }
+
+ ALOGV("Opening mixer card %u", card);
+
+ state->cm->mixer = mixer_open(card);
+
+ if (!state->cm->mixer) {
+ ALOGE("Failed to open mixer card %u", card);
+ return -EINVAL;
+ }
+
+ /* Now we can allow all other root elements but not another <mixer> */
+ state->stack.entry[state->stack.index - 1].valid_subelem =
+ BIT(e_elem_device)
+ | BIT(e_elem_stream);
+ return 0;
+}
+
+static int parse_set_error(struct parse_state *state, int error)
+{
+ state->parse_error = error;
+ state->error_line = XML_GetCurrentLineNumber(state->parser);
+ return error;
+}
+
+static int parse_log_error(struct parse_state *state)
+{
+ int err = state->parse_error;
+ int xml_err = XML_GetErrorCode(state->parser);
+
+ if((err < 0) || (xml_err != XML_ERROR_NONE)) {
+ ALOGE_IF(err < 0, "Error in config file at line %d", state->error_line);
+ ALOGE_IF(xml_err != XML_ERROR_NONE,
+ "Parse error '%s' in config file at line %u",
+ XML_ErrorString(xml_err),
+ (uint)XML_GetCurrentLineNumber(state->parser));
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+}
+
+static int extract_attribs(struct parse_state *state, int elem_index)
+{
+ const uint32_t valid_attribs = elem_table[elem_index].valid_attribs;
+ uint32_t required_attribs = elem_table[elem_index].required_attribs;
+ const XML_Char **attribs = state->attribs.all;
+ int i;
+
+ memset(&state->attribs.value, 0, sizeof(state->attribs.value));
+
+ while (attribs[0] != NULL) {
+ for (i = 0; i < e_attrib_count; ++i ) {
+ if ((BIT(i) & valid_attribs) != 0) {
+ if (0 == strcmp(attribs[0], attrib_table[i].name)) {
+ state->attribs.value[i] = attribs[1];
+ required_attribs &= ~BIT(i);
+ break;
+ }
+ }
+ }
+ if (i >= e_attrib_count) {
+ ALOGE("Attribute '%s' not allowed here", attribs[0] );
+ return -EINVAL;
+ }
+
+ attribs += 2;
+ }
+
+ if (required_attribs != 0) {
+ for (i = 0; i < e_attrib_count; ++i ) {
+ if ((required_attribs & BIT(i)) != 0) {
+ ALOGE("Attribute '%s' required", attrib_table[i].name);
+ }
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void parse_section_start(void *data, const XML_Char *name,
+ const XML_Char **attribs)
+{
+ struct parse_state *state = (struct parse_state *)data;
+ int stack_index = state->stack.index;
+ const uint32_t valid_elems =
+ state->stack.entry[stack_index].valid_subelem;
+ int i;
+
+ if (state->parse_error != 0) {
+ return;
+ }
+
+ ALOGV("parse start <%s>", name );
+
+ /* Find element in list of elements currently valid */
+ for (i = 0; i < e_elem_count; ++i) {
+ if ((BIT(i) & valid_elems) != 0) {
+ if (0 == strcmp(name, elem_table[i].name)) {
+ break;
+ }
+ }
+ }
+
+ if ((i >= e_elem_count) || (stack_index >= MAX_PARSE_DEPTH)) {
+ ALOGE("Element '%s' not allowed here", name);
+ parse_set_error(state, -EINVAL);
+ } else {
+ /* element ok - push onto stack */
+ ++stack_index;
+ state->stack.entry[stack_index].elem_index = i;
+ state->stack.entry[stack_index].valid_subelem
+ = elem_table[i].valid_subelem;
+ state->stack.index = stack_index;
+
+ /* Extract attributes and call handler */
+ state->attribs.all = attribs;
+ if (extract_attribs(state, i) != 0) {
+ parse_set_error(state, -EINVAL);
+ } else {
+ if (elem_table[i].start_fn) {
+ parse_set_error(state, (*elem_table[i].start_fn)(state));
+ }
+ }
+ }
+}
+
+static void parse_section_end(void *data, const XML_Char *name)
+{
+ struct parse_state *state = (struct parse_state *)data;
+ const int i = state->stack.entry[state->stack.index].elem_index;
+
+ if (state->parse_error != 0) {
+ return;
+ }
+
+ ALOGV("parse end <%s>", name );
+
+ if (elem_table[i].end_fn) {
+ state->parse_error = (*elem_table[i].end_fn)(state);
+ }
+
+ --state->stack.index;
+}
+
+static int do_parse(struct parse_state *state)
+{
+ bool eof = false;
+ int len;
+ int ret = 0;
+
+ state->parse_error = 0;
+ state->stack.index = 0;
+ /* First element must be <audiohal> */
+ state->stack.entry[0].valid_subelem = BIT(e_elem_audiohal);
+
+ while (!eof && (state->parse_error == 0)) {
+ len = fread(state->read_buf, 1, sizeof(state->read_buf), state->file);
+ if (ferror(state->file)) {
+ ALOGE("I/O error reading config file");
+ ret = -EIO;
+ break;
+ }
+
+ eof = feof(state->file);
+
+ XML_Parse(state->parser, state->read_buf, len, eof);
+ if (parse_log_error(state) < 0) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int open_config_file(struct parse_state *state)
+{
+ char name[80];
+ char property[PROPERTY_VALUE_MAX];
+
+ property_get("ro.product.device", property, "generic");
+ snprintf(name, sizeof(name), "/system/etc/audio.%s.xml", property);
+
+ ALOGV("Reading configuration from %s\n", name);
+ state->file = fopen(name, "r");
+ if (state->file) {
+ return 0;
+ } else {
+ ALOGE_IF(!state->file, "Failed to open config file %s", name);
+ return -ENOSYS;
+ }
+}
+
+static void cleanup_parser(struct parse_state *state)
+{
+ if (state) {
+ path_names_free(state);
+
+ dyn_array_free(&state->init_path.ctl_array);
+
+ if (state->parser) {
+ XML_ParserFree(state->parser);
+ }
+
+ if (state->file) {
+ fclose(state->file);
+ }
+
+ free(state);
+ }
+}
+
+static int parse_config_file(struct config_mgr *cm)
+{
+ struct parse_state *state;
+ int ret = 0;
+
+ state = calloc(1, sizeof(struct parse_state));
+ if (!state) {
+ return -ENOMEM;
+ }
+ state->cm = cm;
+ state->path_name_array.elem_size = sizeof(const char *);
+ state->init_path.ctl_array.elem_size = sizeof(struct ctl);
+
+ /* "off" and "on" are pre-defined path names */
+ ret = add_path_name(state, predefined_path_name_table[0]);
+ if (ret < 0) {
+ goto fail;
+ }
+ ret = add_path_name(state, predefined_path_name_table[1]);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = open_config_file(state);
+ if (ret == 0) {
+ ret = -ENOMEM;
+ state->parser = XML_ParserCreate(NULL);
+ if (state->parser) {
+ XML_SetUserData(state->parser, state);
+ XML_SetElementHandler(state->parser, parse_section_start, parse_section_end);
+ ret = do_parse(state);
+ }
+ }
+
+ if (ret >= 0) {
+ /* Initialize the mixer by applying the <init> path */
+ /* No need to take mutex during initialization */
+ apply_path_l(&state->init_path);
+ }
+
+fail:
+ cleanup_parser(state);
+ return ret;
+}
+
+/*********************************************************************
+ * Initialization
+ *********************************************************************/
+
+struct config_mgr *init_audio_config()
+{
+ struct stream *streams;
+ int ret;
+
+ struct config_mgr* mgr = new_config_mgr();
+
+ if (0 != parse_config_file(mgr)) {
+ free(mgr);
+ return NULL;
+ }
+
+ /* Free unused memory in the device and stream arrays */
+ compress_config_mgr(mgr);
+
+ return mgr;
+}
+
+static void free_usecases( struct stream *stream )
+{
+ struct usecase *puc = stream->usecase_array.usecases;
+ int uc_count = stream->usecase_array.count;
+ struct scase *pcase;
+ int i;
+
+ for (; uc_count > 0; uc_count--, puc++) {
+ free((void *)puc->name);
+ pcase = puc->case_array.cases;
+ for (i = puc->case_array.count; i > 0; i--, pcase++) {
+ free((void *)pcase->name);
+ }
+ }
+}
+
+void free_audio_config( struct config_mgr *cm )
+{
+ struct dyn_array *path_array, *ctl_array, *stream_array;
+ int dev_idx, path_idx, ctl_idx, stream_idx;
+
+ if (cm) {
+ /* Free all devices */
+ for (dev_idx = cm->device_array.count - 1; dev_idx >= 0; --dev_idx) {
+ /* Free all paths in device */
+ path_array = &cm->device_array.devices[dev_idx].path_array;
+ for (path_idx = path_array->count - 1; path_idx >= 0; --path_idx) {
+ /* Free all ctls in path */
+ ctl_array = &path_array->paths[path_idx].ctl_array;
+ for (ctl_idx = ctl_array->count - 1; ctl_idx >= 0; --ctl_idx) {
+ if (ctl_array->ctls[ctl_idx].value.name) {
+ free((void*)ctl_array->ctls[ctl_idx].value.name);
+ ctl_array->ctls[ctl_idx].value.name = NULL;
+ }
+ if (ctl_array->ctls[ctl_idx].value.data) {
+ free((void*)ctl_array->ctls[ctl_idx].value.data);
+ ctl_array->ctls[ctl_idx].value.data = NULL;
+ }
+ }
+ dyn_array_free(ctl_array);
+ }
+
+ dyn_array_free(path_array);
+ }
+ dyn_array_free(&cm->device_array);
+
+ stream_array = &cm->stream_array;
+ for(stream_idx = stream_array->count - 1; stream_idx >= 0; --stream_idx) {
+ free_usecases(&stream_array->streams[stream_idx]);
+ }
+ dyn_array_free(&cm->stream_array);
+
+ if (cm->mixer) {
+ mixer_close(cm->mixer);
+ }
+ pthread_mutex_destroy(&cm->lock);
+ free(cm);
+ }
+}
+
diff --git a/audio_config.h b/audio_config.h
new file mode 100644
index 0000000..093a273
--- /dev/null
+++ b/audio_config.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2012-2013 Wolfson Microelectronics plc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUDIO_CONFIG_H
+#define AUDIO_CONFIG_H
+
+#include <stddef.h>
+#include <system/audio.h>
+
+struct mixer_ctl;
+struct config_mgr;
+struct audio_config;
+
+/** Stream type */
+enum stream_type {
+ e_stream_out_pcm,
+ e_stream_in_pcm,
+ e_stream_out_compress,
+ e_stream_in_compress,
+ e_stream_hardware /* unspecified type routed in hardware */
+};
+
+/** Information about a stream */
+struct hw_stream {
+ enum stream_type type : 8;
+ uint8_t card_number;
+ uint8_t device_number;
+ unsigned int rate;
+ unsigned int period_size;
+ unsigned int period_count;
+};
+
+/** Test whether a stream is an input */
+static inline bool stream_is_input( const struct hw_stream *stream )
+{
+ return (stream->type == e_stream_in_pcm)
+ || (stream->type == e_stream_in_compress);
+}
+
+/** Test whether a stream is PCM */
+static inline bool stream_is_pcm( const struct hw_stream *stream )
+{
+ return (stream->type == e_stream_out_pcm)
+ || (stream->type == e_stream_in_pcm);
+}
+
+/** Test whether a stream is compressed */
+static inline bool stream_is_compressed( const struct hw_stream *stream )
+{
+ return (stream->type == e_stream_out_compress)
+ || (stream->type == e_stream_in_compress);
+}
+
+/** Test whether stream is PCM output */
+static inline bool stream_is_pcm_out( const struct hw_stream *stream )
+{
+return (stream->type == e_stream_out_pcm);
+}
+
+/** Test whether stream is PCM input */
+static inline bool stream_is_pcm_in( const struct hw_stream *stream )
+{
+return (stream->type == e_stream_in_pcm);
+}
+
+/** Test whether stream is compressed output */
+static inline bool stream_is_compressed_out( const struct hw_stream *stream )
+{
+return (stream->type == e_stream_out_compress);
+}
+
+/** Test whether stream is compressed input */
+static inline bool stream_is_compressed_in( const struct hw_stream *stream )
+{
+return (stream->type == e_stream_in_compress);
+}
+
+/** Test whether stream is a hardware link */
+static inline bool stream_is_hardware( const struct hw_stream *stream )
+{
+return (stream->type == e_stream_hardware);
+}
+
+/** Initialize audio config layer */
+struct config_mgr *init_audio_config();
+
+/** Delete audio config layer */
+void free_audio_config( struct config_mgr *cm );
+
+/** Get list of all supported devices */
+uint32_t get_supported_devices( struct config_mgr *cm );
+
+/** Find a suitable stream and return pointer to it */
+const struct hw_stream *get_stream( struct config_mgr *cm,
+ const audio_devices_t devices,
+ const audio_output_flags_t flags,
+ const struct audio_config *config );
+
+/** Find a named custom stream and return a pointer to it */
+const struct hw_stream *get_named_stream(struct config_mgr *cm,
+ const char *name);
+
+/** Test whether a named custom stream is defined */
+bool is_named_stream_defined(struct config_mgr *cm, const char *name);
+
+/** Release stream */
+void release_stream( const struct hw_stream *stream );
+
+/** Get currently connected routes */
+uint32_t get_current_routes( const struct hw_stream *stream );
+
+/** Apply new device routing to a stream */
+void apply_route( const struct hw_stream *stream, uint32_t devices );
+
+/** Get bitmask of devices currently connected to this stream */
+uint32_t get_routed_devices( const struct hw_stream *stream );
+
+/** Adjust routing for all streams that alter with orentiation */
+void rotate_routes( struct config_mgr *cm, int orientation );
+
+/** Apply hardware volume */
+int set_hw_volume( const struct hw_stream *stream, int left_pc, int right_pc);
+
+/** Apply a custom use-case
+ *
+ * @return 0 on success
+ * @return -ENOSYS if the usecase not declared
+ */
+int apply_use_case( const struct hw_stream* stream,
+ const char *setting,
+ const char *case_name);
+#endif /* ifndef AUDIO_CONFIG_H */
diff --git a/audio_hw.c b/audio_hw.c
index be5cdff..dbf5e10 100644
--- a/audio_hw.c
+++ b/audio_hw.c
@@ -1,8 +1,9 @@
/*
- * Copyright (C) 2012 Wolfson Microelectronics plc
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2012-13 Wolfson Microelectronics plc
*
- * Liberal inspiration drawn from the AOSP code for Toro.
+ * This code is heavily based on AOSP HAL for the asus/grouper
+ *
+ * Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,8 +18,8 @@
* limitations under the License.
*/
-#define LOG_TAG "tiny_hw"
-#define LOG_NDEBUG 0
+#define LOG_TAG "tinyhal"
+/*#define LOG_NDEBUG 0*/
#include <errno.h>
#include <pthread.h>
@@ -30,204 +31,280 @@
#include <cutils/properties.h>
#include <cutils/str_parms.h>
-#include <hardware/hardware.h>
-#include <system/audio.h>
-#include <hardware/audio.h>
+#include <utils/Timers.h>
-#include <expat.h>
+#include <hardware/audio.h>
+#include <hardware/hardware.h>
+
+#include <system/audio.h>
#include <tinyalsa/asoundlib.h>
+#include <sound/compress_params.h>
+#include <sound/compress_offload.h>
+#include <tinycompress/tinycompress.h>
+
#include <audio_utils/resampler.h>
-#include <hardware/audio_effect.h>
-struct route_setting
-{
- char *ctl_name;
- int intval;
- char *strval;
+#include "audio_config.h"
+#include "voice_trigger.h"
+
+#include <math.h>
+
+
+#define OUT_PERIOD_SIZE_DEFAULT 1024
+#define OUT_PERIOD_COUNT_DEFAULT 4
+#define OUT_SAMPLING_RATE_DEFAULT 44100
+#define OUT_CHANNEL_MASK_DEFAULT AUDIO_CHANNEL_OUT_STEREO
+
+#define IN_PERIOD_COUNT_DEFAULT 4
+#define IN_PERIOD_SIZE_DEFAULT 1024
+#define IN_SAMPLING_RATE_DEFAULT 44100
+#define IN_CHANNEL_MASK_DEFAULT AUDIO_CHANNEL_IN_MONO
+
+/* AudioFlinger does not re-read the buffer size after
+ * issuing a routing or input_source change so the
+ * default buffer size must be suitable for both PCM
+ * and compressed inputs
+ */
+#define IN_COMPRESS_BUFFER_SIZE_DEFAULT 8192
+
+/* Maximum time we'll wait for data from a compress_pcm input */
+#define MAX_COMPRESS_PCM_TIMEOUT_MS 2100
+
+struct voice_control_trigger {
+ pthread_t thread;
+ pthread_mutex_t lock;
+ pthread_cond_t waitcv;
+ struct compress *compress;
+ void (*callback)(void *param);
+ void *callback_param;
+ bool own_compress;
+ volatile bool wait;
+ volatile bool terminate;
+ volatile bool triggered;
};
-/* The enable flag when 0 makes the assumption that enums are disabled by
- * "Off" and integers/booleans by 0 */
-static int set_route_by_array(struct mixer *mixer, struct route_setting *route,
- unsigned int len)
-{
- struct mixer_ctl *ctl;
- unsigned int i, j, ret;
+struct audio_device {
+ struct audio_hw_device hw_device;
- /* Go through the route array and set each value */
- for (i = 0; i < len; i++) {
- ctl = mixer_get_ctl_by_name(mixer, route[i].ctl_name);
- if (!ctl) {
- LOGE("Unknown control '%s'\n", route[i].ctl_name);
- return -EINVAL;
- }
-
- if (route[i].strval) {
- ret = mixer_ctl_set_enum_by_string(ctl, route[i].strval);
- if (ret != 0) {
- LOGE("Failed to set '%s' to '%s'\n",
- route[i].ctl_name, route[i].strval);
- } else {
- LOGV("Set '%s' to '%s'\n",
- route[i].ctl_name, route[i].strval);
- }
-
- } else {
- /* This ensures multiple (i.e. stereo) values are set jointly */
- for (j = 0; j < mixer_ctl_get_num_values(ctl); j++) {
- ret = mixer_ctl_set_value(ctl, j, route[i].intval);
- if (ret != 0) {
- LOGE("Failed to set '%s'.%d to %d\n",
- route[i].ctl_name, j, route[i].intval);
- } else {
- LOGV("Set '%s'.%d to %d\n",
- route[i].ctl_name, j, route[i].intval);
- }
- }
- }
- }
-
- return 0;
-}
-
-struct tiny_dev_cfg {
- int mask;
-
- struct route_setting *on;
- unsigned int on_len;
-
- struct route_setting *off;
- unsigned int off_len;
-};
-
-struct tiny_audio_device {
- struct audio_hw_device device;
- struct mixer *mixer;
-
- int mode;
-
- pthread_mutex_t route_lock;
- struct tiny_dev_cfg *dev_cfgs;
- int num_dev_cfgs;
- int active_devices;
- int devices;
-
+ pthread_mutex_t lock; /* see note below on mutex acquisition order */
+ bool standby;
bool mic_mute;
+ struct config_mgr *cm;
+ int orientation;
+ bool screen_off;
+
+ struct stream_out_pcm *active_out;
+ struct stream_in_pcm *active_in;
+ struct stream_in_pcm *active_voice_control;
};
-struct tiny_stream_out {
+
+typedef void(*close_fn)(struct audio_stream *);
+
+/* Fields common to all types of output stream */
+struct stream_out_common {
struct audio_stream_out stream;
- struct tiny_audio_device *adev;
+ close_fn close;
+ struct audio_device *dev;
+ const struct hw_stream* hw;
- struct pcm_config config;
- struct pcm *pcm;
+ pthread_mutex_t lock; /* see note below on mutex acquisition order */
+
+ bool standby;
+
+ /* Stream parameters as seen by AudioFlinger
+ * If stream is resampling AudioFlinger buffers before
+ * passing them to hardware, these members refer to the
+ * _input_ data from AudioFlinger
+ */
+ audio_format_t format;
+ uint32_t channel_mask;
+ int channel_count;
+ uint32_t sample_rate;
+ size_t frame_size;
+ uint32_t buffer_size;
+
+ struct {
+ uint32_t screen_off;
+ uint32_t screen_on;
+ } latency;
};
-#define MAX_PREPROCESSORS 10
+struct stream_out_pcm {
+ struct stream_out_common common;
-struct tiny_stream_in {
+ struct pcm *pcm;
+
+ uint32_t hw_sample_rate; /* actual sample rate of hardware */
+ int hw_channel_count; /* actual number of output channels */
+};
+
+/* Fields common to all types of input stream */
+struct stream_in_common {
struct audio_stream_in stream;
- pthread_mutex_t lock;
+ close_fn close;
+ struct audio_device *dev;
+ const struct hw_stream* hw;
- struct tiny_audio_device *adev;
+ pthread_mutex_t lock; /* see note below on mutex acquisition order */
- struct pcm_config config;
- struct pcm *pcm;
+ bool standby;
+
+ /* Stream parameters as seen by AudioFlinger
+ * If stream is resampling AudioFlinger buffers before
+ * passing them to hardware, these members refer to the
+ * _input_ data from AudioFlinger
+ */
+ audio_format_t format;
+ uint32_t channel_mask;
+ int channel_count;
+ uint32_t sample_rate;
+ size_t frame_size;
+ size_t buffer_size;
+
+ int input_source;
+};
+
+struct stream_in_pcm {
+ struct stream_in_common common;
+
+ union {
+ struct pcm *pcm;
+ struct compress *compress;
+ };
+
+ uint32_t hw_sample_rate; /* actual sample rate of hardware */
+ int hw_channel_count; /* actual number of input channels */
+ uint32_t period_size; /* ... of PCM input */
struct resampler_itfe *resampler;
struct resampler_buffer_provider buf_provider;
int16_t *buffer;
+ size_t in_buffer_size;
+ int in_buffer_frames;
size_t frames_in;
- unsigned int requested_rate;
- int standby;
- int source;
- effect_handle_t preprocessors[MAX_PREPROCESSORS];
- int num_preprocessors;
- int16_t *proc_buf;
- size_t proc_buf_size;
- size_t proc_frames_in;
int read_status;
+
+ struct voice_control_trigger *vc_trigger;
};
-/* Must be called with route_lock */
-void select_devices(struct tiny_audio_device *adev)
+enum {
+ ORIENTATION_LANDSCAPE,
+ ORIENTATION_PORTRAIT,
+ ORIENTATION_SQUARE,
+ ORIENTATION_UNDEFINED,
+};
+
+static uint32_t out_get_sample_rate(const struct audio_stream *stream);
+static uint32_t in_get_sample_rate(const struct audio_stream *stream);
+static int get_next_buffer(struct resampler_buffer_provider *buffer_provider,
+ struct resampler_buffer* buffer);
+static void release_buffer(struct resampler_buffer_provider *buffer_provider,
+ struct resampler_buffer* buffer);
+/*
+ * NOTE: when multiple mutexes have to be acquired, always take the
+ * audio_device mutex first, followed by the stream_in and/or
+ * stream_out mutexes.
+ */
+
+/*********************************************************************
+ * Stream common functions
+ *********************************************************************/
+
+static int common_set_parameters_locked(const struct hw_stream *stream, const char *kvpairs)
{
- int i;
+ char *parms = strdup(kvpairs);
+ char *p, *temp;
+ char *pval;
+ char value[32];
+ int ret;
- if (adev->active_devices == adev->devices)
- return;
+ ALOGV("+common_set_parameters(%p) '%s'", stream, kvpairs);
- LOGV("Changing devices %x => %x\n", adev->active_devices, adev->devices);
+ if (!parms) {
+ return -ENOMEM;
+ }
- /* Turn on new devices first so we don't glitch due to powerdown... */
- for (i = 0; i < adev->num_dev_cfgs; i++)
- if ((adev->devices & adev->dev_cfgs[i].mask) &&
- !(adev->active_devices & adev->dev_cfgs[i].mask))
- set_route_by_array(adev->mixer, adev->dev_cfgs[i].on,
- adev->dev_cfgs[i].on_len);
+ /* It's not obvious what we should do if multiple parameters
+ * are given and we only understand some. The action taken
+ * here is to process all that we understand and only return
+ * and error if we don't understand any
+ */
+ ret = -ENOTSUP;
+ p = strtok_r(parms, ";", &temp);
+ while(p) {
+ pval = strchr(p, '=');
+ if (pval && (pval[1] != '\0')) {
+ *pval = '\0';
+ if (apply_use_case(stream, p, pval+1) >= 0) {
+ ret = 0;
+ }
+ *pval = '=';
+ }
+ p = strtok_r(NULL, ";", &temp);
+ }
- /* ...then disable old ones. */
- for (i = 0; i < adev->num_dev_cfgs; i++)
- if (!(adev->devices & adev->dev_cfgs[i].mask) &&
- (adev->active_devices & adev->dev_cfgs[i].mask))
- set_route_by_array(adev->mixer, adev->dev_cfgs[i].off,
- adev->dev_cfgs[i].off_len);
-
- adev->active_devices = adev->devices;
+ return ret;
}
+static int common_get_routing_param(uint32_t *vout, const char *kvpairs)
+{
+ struct str_parms *parms;
+ char value[32];
+ int ret;
+
+ parms = str_parms_create_str(kvpairs);
+
+ ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+ value, sizeof(value));
+ if (ret >= 0) {
+ *vout = atoi(value);
+ }
+ str_parms_destroy(parms);
+ return ret;
+}
+
+/*********************************************************************
+ * Output stream common functions
+ *********************************************************************/
+
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
{
- return 44100;
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ return out->sample_rate;
}
static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
- if (rate == out_get_sample_rate(stream))
- return 0;
- else
- return -EINVAL;
+ return -ENOSYS;
}
static size_t out_get_buffer_size(const struct audio_stream *stream)
{
- return 4096;
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ ALOGV("out_get_buffer_size(%p): %u", stream, out->buffer_size );
+ return out->buffer_size;
}
static uint32_t out_get_channels(const struct audio_stream *stream)
{
- return AUDIO_CHANNEL_OUT_STEREO;
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ return out->channel_mask;
}
-static int out_get_format(const struct audio_stream *stream)
+static audio_format_t out_get_format(const struct audio_stream *stream)
{
- return AUDIO_FORMAT_PCM_16_BIT;
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ /*ALOGV("out_get_format(%p): 0x%x", stream, out->format );*/
+ return out->format;
}
-static int out_set_format(struct audio_stream *stream, int format)
+static int out_set_format(struct audio_stream *stream, audio_format_t format)
{
- return 0;
-}
-
-static int out_standby(struct audio_stream *stream)
-{
- struct tiny_stream_out *out = (struct tiny_stream_out *)stream;
- int ret;
-
- if (out->pcm) {
- LOGV("out_standby(%p) closing PCM\n", stream);
- ret = pcm_close(out->pcm);
- if (ret != 0) {
- LOGE("out_standby(%p) failed: %d\n", stream, ret);
- return ret;
- }
- out->pcm = NULL;
- }
-
- return 0;
+ return -ENOSYS;
}
static int out_dump(const struct audio_stream *stream, int fd)
@@ -237,36 +314,29 @@
static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
{
- struct tiny_stream_out *out = (struct tiny_stream_out *)stream;
- struct tiny_audio_device *adev = out->adev;
- struct str_parms *parms;
- char *str;
- char value[32];
- int ret, val = 0;
- bool force_input_standby = false;
+ ALOGV("+out_set_parameters(%p) '%s'", stream, kvpairs);
- parms = str_parms_create_str(kvpairs);
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ struct audio_device *adev = out->dev;
+ uint32_t v;
+ int ret;
- ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
- value, sizeof(value));
+ ret = common_get_routing_param(&v, kvpairs);
+
+ pthread_mutex_lock(&adev->lock);
+
if (ret >= 0) {
- val = atoi(value);
-
- if (val != 0) {
- pthread_mutex_lock(&adev->route_lock);
-
- adev->devices &= ~AUDIO_DEVICE_OUT_ALL;
- adev->devices |= val;
- select_devices(adev);
-
- pthread_mutex_unlock(&adev->route_lock);
- } else {
- LOGW("output routing with no devices\n");
- }
+ apply_route(out->hw, v);
}
- str_parms_destroy(parms);
+ if (common_set_parameters_locked(out->hw, kvpairs) >= 0) {
+ ret = 0;
+ }
+
+ pthread_mutex_unlock(&adev->lock);
+
+ ALOGV("-out_set_parameters(%p):%d", out, ret);
return ret;
}
@@ -277,47 +347,50 @@
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
- return 0;
-}
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ struct audio_device *adev = out->dev;
+ uint32_t latency;
-static int out_set_volume(struct audio_stream_out *stream, float left,
- float right)
-{
- /* Use the soft volume control for now; AudioFlinger rarely
- * actually calls down. */
- return -EINVAL;
-}
+ pthread_mutex_lock(&adev->lock);
-static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
- size_t bytes)
-{
- struct tiny_stream_out *out = (struct tiny_stream_out *)stream;
- int ret;
-
- if (!out->pcm) {
- LOGV("out_write(%p) opening PCM\n", stream);
- out->pcm = pcm_open(0, 0, PCM_OUT | PCM_MMAP, &out->config);
-
- if (!pcm_is_ready(out->pcm)) {
- LOGE("Failed to open output PCM: %s", pcm_get_error(out->pcm));
- pcm_close(out->pcm);
- return -EBUSY;
- }
+ if (adev->screen_off && !adev->active_in) {
+ latency = out->latency.screen_off;
+ } else {
+ latency = out->latency.screen_on;
}
- ret = pcm_mmap_write(out->pcm, buffer, bytes);
- if (ret != 0) {
- LOGE("out_write(%p) failed: %d\n", stream, ret);
- return ret;
- }
+ pthread_mutex_unlock(&adev->lock);
- return bytes;
+ return latency;
}
-static int out_get_render_position(const struct audio_stream_out *stream,
- uint32_t *dsp_frames)
+static int volume_to_percent(float volume)
{
- return -EINVAL;
+ float decibels;
+ float percent;
+
+ /* Converting back to a decibel scale */
+ if(volume > 0) {
+ decibels = log(volume) / 0.115129f;
+ } else {
+ /* Use the maximum attenuation value 58 */
+ decibels = -58;
+ }
+
+ /* decibels range is -58..0, rescale to range 0..100 */
+ percent = ((decibels + 58.0) * (100.0/58.0));
+ return (int)percent;
+}
+
+static int out_set_volume(struct audio_stream_out *stream, float left, float right)
+{
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ int l_pc = volume_to_percent(left);
+ int r_pc = volume_to_percent(right);
+
+ ALOGV("out_set_volume (%f,%f) -> (%d%%,%d%%)", left, right, l_pc, r_pc);
+
+ return set_hw_volume(out->hw, l_pc, r_pc);
}
static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
@@ -330,40 +403,298 @@
return 0;
}
-/** audio_stream_in implementation **/
+static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
+ int64_t *timestamp)
+{
+ return -EINVAL;
+}
+
+static void do_close_out_common(struct audio_stream *stream)
+{
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ release_stream(out->hw);
+ free(stream);
+}
+
+static int do_init_out_common( struct stream_out_common *out,
+ const struct audio_config *config,
+ audio_devices_t devices )
+{
+ int ret;
+
+ out->standby = true;
+
+ out->stream.common.get_sample_rate = out_get_sample_rate;
+ out->stream.common.set_sample_rate = out_set_sample_rate;
+ out->stream.common.get_buffer_size = out_get_buffer_size;
+ out->stream.common.get_channels = out_get_channels;
+ out->stream.common.get_format = out_get_format;
+ out->stream.common.set_format = out_set_format;
+ out->stream.common.dump = out_dump;
+ out->stream.common.set_parameters = out_set_parameters;
+ out->stream.common.get_parameters = out_get_parameters;
+ out->stream.common.add_audio_effect = out_add_audio_effect;
+ out->stream.common.remove_audio_effect = out_remove_audio_effect;
+ out->stream.get_latency = out_get_latency;
+ out->stream.set_volume = out_set_volume;
+ out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
+
+ /* init requested stream config */
+ out->format = config->format;
+ out->sample_rate = (config->sample_rate == 0)
+ ? OUT_SAMPLING_RATE_DEFAULT
+ : config->sample_rate;
+ out->channel_mask = (config->channel_mask == 0)
+ ? OUT_CHANNEL_MASK_DEFAULT
+ : config->channel_mask;
+ out->channel_count = popcount(out->channel_mask);
+
+ /* Default settings */
+ out->frame_size = audio_stream_frame_size(&out->stream.common);
+
+ /* Apply initial route */
+ apply_route(out->hw, devices);
+
+ return 0;
+}
+
+/*********************************************************************
+ * PCM output stream
+ *********************************************************************/
+
+static unsigned int out_pcm_cfg_period_count(struct stream_out_pcm *out)
+{
+ if (out->common.hw->period_count != 0) {
+ return out->common.hw->period_count;
+ } else {
+ return OUT_PERIOD_COUNT_DEFAULT;
+ }
+}
+
+static unsigned int out_pcm_cfg_period_size(struct stream_out_pcm *out)
+{
+ if (out->common.hw->period_size != 0) {
+ return out->common.hw->period_size;
+ } else {
+ return OUT_PERIOD_SIZE_DEFAULT;
+ }
+}
+
+static unsigned int out_pcm_cfg_rate(struct stream_out_pcm *out)
+{
+ if (out->common.hw->rate != 0) {
+ return out->common.hw->rate;
+ } else {
+ return OUT_SAMPLING_RATE_DEFAULT;
+ }
+}
+
+/* must be called with hw device and output stream mutexes locked */
+static void do_out_pcm_standby(struct stream_out_pcm *out)
+{
+ struct audio_device *adev = out->common.dev;
+
+ ALOGV("+do_out_standby(%p)", out);
+
+ if (!out->common.standby) {
+ pcm_close(out->pcm);
+ out->pcm = NULL;
+ adev->active_out = NULL;
+ out->common.standby = true;
+ }
+
+ ALOGV("-do_out_standby(%p)", out);
+}
+
+static void out_pcm_fill_params(struct stream_out_pcm *out,
+ const struct pcm_config *config )
+{
+ out->hw_sample_rate = config->rate;
+ out->hw_channel_count = config->channels;
+ out->common.buffer_size = pcm_frames_to_bytes(out->pcm,
+ pcm_get_buffer_size(out->pcm));
+
+ out->common.latency.screen_on = (config->period_size * config->period_count * 1000)
+ / config->rate;
+ out->common.latency.screen_off = out->common.latency.screen_on;
+}
+
+/* must be called with hw device and output stream mutexes locked */
+static int start_output_pcm(struct stream_out_pcm *out)
+{
+ struct audio_device *adev = out->common.dev;
+ int ret;
+
+ struct pcm_config config = {
+ .channels = out->common.channel_count,
+ .rate = out_pcm_cfg_rate(out),
+ .period_size = out_pcm_cfg_period_size(out),
+ .period_count = out_pcm_cfg_period_count(out),
+ .format = PCM_FORMAT_S16_LE,
+ .start_threshold = 0,
+ .stop_threshold = 0,
+ .silence_threshold = 0
+ };
+
+ ALOGV("+start_output_stream(%p)", out);
+
+ out->pcm = pcm_open(out->common.hw->card_number,
+ out->common.hw->device_number,
+ PCM_OUT,
+ &config);
+
+ if (out->pcm && !pcm_is_ready(out->pcm)) {
+ ALOGE("pcm_open(out) failed: %s", pcm_get_error(out->pcm));
+ pcm_close(out->pcm);
+ return -ENOMEM;
+ }
+
+ out_pcm_fill_params( out, &config );
+
+ adev->active_out = out;
+
+ ALOGV("-start_output_stream(%p)", out);
+ return 0;
+}
+
+static int out_pcm_standby(struct audio_stream *stream)
+{
+ struct stream_out_pcm *out = (struct stream_out_pcm *)stream;
+
+ pthread_mutex_lock(&out->common.dev->lock);
+ pthread_mutex_lock(&out->common.lock);
+ do_out_pcm_standby(out);
+ pthread_mutex_unlock(&out->common.lock);
+ pthread_mutex_unlock(&out->common.dev->lock);
+
+ return 0;
+}
+
+static ssize_t out_pcm_write(struct audio_stream_out *stream, const void* buffer,
+ size_t bytes)
+{
+ ALOGV("+out_pcm_write(%p) l=%u", stream, bytes);
+
+ int ret = 0;
+ struct stream_out_pcm *out = (struct stream_out_pcm *)stream;
+ struct audio_device *adev = out->common.dev;
+
+ /* Check that we are routed to something. Android can send routing
+ * commands that tell us to disconnect from everything and in that
+ * state we shouldn't issue any write commands because we can't be
+ * sure that the driver will accept a write to nowhere
+ */
+ if (get_current_routes(out->common.hw) == 0) {
+ ALOGV("-out_pcm_write(%p) 0 (no routes)", stream);
+ return 0;
+ }
+
+ /*
+ * acquiring hw device mutex systematically is useful if a low
+ * priority thread is waiting on the output stream mutex - e.g.
+ * executing out_set_parameters() while holding the hw device
+ * mutex
+ */
+ pthread_mutex_lock(&adev->lock);
+ pthread_mutex_lock(&out->common.lock);
+ if (out->common.standby) {
+ ret = start_output_pcm(out);
+ if (ret != 0) {
+ pthread_mutex_unlock(&adev->lock);
+ goto exit;
+ }
+ out->common.standby = false;
+ }
+ pthread_mutex_unlock(&adev->lock);
+
+ ret = pcm_write(out->pcm, buffer, bytes);
+ if (ret >= 0) {
+ ret = bytes;
+ }
+
+exit:
+ pthread_mutex_unlock(&out->common.lock);
+
+ ALOGV("-out_pcm_write(%p) r=%u", stream, ret);
+
+ return ret;
+}
+
+static int out_pcm_get_render_position(const struct audio_stream_out *stream,
+ uint32_t *dsp_frames)
+{
+ return -EINVAL;
+}
+static void do_close_out_pcm(struct audio_stream *stream)
+{
+ out_pcm_standby(stream);
+ do_close_out_common(stream);
+}
+
+static int do_init_out_pcm( struct stream_out_pcm *out,
+ const struct audio_config *config )
+{
+ if (config->sample_rate != out_pcm_cfg_rate(out)) {
+ ALOGE("AF requested rate %u not supported", config->sample_rate);
+ return -ENOTSUP;
+ }
+
+ out->common.close = do_close_out_pcm;
+ out->common.stream.common.standby = out_pcm_standby;
+ out->common.stream.write = out_pcm_write;
+ out->common.stream.get_render_position = out_pcm_get_render_position;
+
+ out->common.buffer_size = out_pcm_cfg_period_size(out)
+ * out_pcm_cfg_period_count(out)
+ * out->common.frame_size;
+ return 0;
+}
+
+/*********************************************************************
+ * Input stream common functions
+ *********************************************************************/
static uint32_t in_get_sample_rate(const struct audio_stream *stream)
{
- return 8000;
+ const struct stream_in_common *in = (struct stream_in_common *)stream;
+
+ return in->sample_rate;
}
static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
- return 0;
+ const struct stream_in_common *in = (struct stream_in_common *)stream;
+
+ if (rate == in->sample_rate) {
+ return 0;
+ } else {
+ return -ENOTSUP;
+ }
+}
+
+static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
+{
+ const struct stream_in_common *in = (struct stream_in_common *)stream;
+
+ return in->channel_mask;
+}
+
+static audio_format_t in_get_format(const struct audio_stream *stream)
+{
+ const struct stream_in_common *in = (struct stream_in_common *)stream;
+
+ return in->format;
+}
+
+static int in_set_format(struct audio_stream *stream, audio_format_t format)
+{
+ return -ENOSYS;
}
static size_t in_get_buffer_size(const struct audio_stream *stream)
{
- return 320;
-}
-
-static uint32_t in_get_channels(const struct audio_stream *stream)
-{
- return AUDIO_CHANNEL_IN_MONO;
-}
-
-static int in_get_format(const struct audio_stream *stream)
-{
- return AUDIO_FORMAT_PCM_16_BIT;
-}
-
-static int in_set_format(struct audio_stream *stream, int format)
-{
- return 0;
-}
-
-static int in_standby(struct audio_stream *stream)
-{
- return 0;
+ const struct stream_in_common *in = (struct stream_in_common *)stream;
+ ALOGV("in_get_buffer_size(%p): %u", stream, in->buffer_size );
+ return in->buffer_size;
}
static int in_dump(const struct audio_stream *stream, int fd)
@@ -387,112 +718,1235 @@
return 0;
}
-static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
- size_t bytes)
-{
- /* XXX: fake timing for audio input */
- usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) /
- in_get_sample_rate(&stream->common));
- return bytes;
-}
-
static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
{
return 0;
}
-static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+static int in_add_audio_effect(const struct audio_stream *stream,
+ effect_handle_t effect)
{
return 0;
}
-static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+static int in_remove_audio_effect(const struct audio_stream *stream,
+ effect_handle_t effect)
{
return 0;
}
-static int adev_open_output_stream(struct audio_hw_device *dev,
- uint32_t devices, int *format,
- uint32_t *channels, uint32_t *sample_rate,
- struct audio_stream_out **stream_out)
+static void do_close_in_common(struct audio_stream *stream)
{
- struct tiny_audio_device *adev = (struct tiny_audio_device *)dev;
- struct tiny_stream_out *out;
+ struct stream_in_common *in = (struct stream_in_common *)stream;
+ in->stream.common.standby(stream);
+
+ /* active_voice_control is not cleared by standby so we must
+ * clear it here when stream is closed
+ */
+ if ((struct stream_in_common *)in->dev->active_voice_control == in) {
+ in->dev->active_voice_control = NULL;
+ }
+ release_stream(in->hw);
+ free(stream);
+}
+
+static int do_init_in_common( struct stream_in_common *in,
+ const struct audio_config *config,
+ audio_devices_t devices )
+{
+ in->standby = true;
+
+ in->close = do_close_in_common;
+ in->stream.common.get_sample_rate = in_get_sample_rate;
+ in->stream.common.set_sample_rate = in_set_sample_rate;
+ in->stream.common.get_buffer_size = in_get_buffer_size;
+ in->stream.common.get_channels = in_get_channels;
+ in->stream.common.get_format = in_get_format;
+ in->stream.common.set_format = in_set_format;
+ in->stream.common.dump = in_dump;
+ in->stream.common.set_parameters = in_set_parameters;
+ in->stream.common.get_parameters = in_get_parameters;
+ in->stream.common.add_audio_effect = in_add_audio_effect;
+ in->stream.common.remove_audio_effect = in_remove_audio_effect;
+ in->stream.set_gain = in_set_gain;
+ in->stream.get_input_frames_lost = in_get_input_frames_lost;
+
+ /* init requested stream config */
+ in->format = config->format;
+ in->sample_rate = (config->sample_rate == 0)
+ ? IN_SAMPLING_RATE_DEFAULT
+ : config->sample_rate;
+ in->channel_mask = (config->channel_mask == 0)
+ ? IN_CHANNEL_MASK_DEFAULT
+ : config->channel_mask;
+ in->channel_count = popcount(in->channel_mask);
+
+ in->frame_size = audio_stream_frame_size(&in->stream.common);
+
+ /* Apply initial routing */
+ apply_route(in->hw, devices);
+
+ return 0;
+}
+
+/*********************************************************************
+ * Voice control triggering
+ * Currently assumes a compressed channel
+ *********************************************************************/
+
+static void* voice_control_trigger_thread(void *param)
+{
+ struct voice_control_trigger *self = param;
+ struct compress *compress;
int ret;
- out = calloc(1, sizeof(struct tiny_stream_out));
- if (!out)
- return -ENOMEM;
+ /* signal that we're alive and ready */
+ pthread_cond_signal(&self->waitcv);
- out->stream.common.get_sample_rate = out_get_sample_rate;
- out->stream.common.set_sample_rate = out_set_sample_rate;
- out->stream.common.get_buffer_size = out_get_buffer_size;
- out->stream.common.get_channels = out_get_channels;
- out->stream.common.get_format = out_get_format;
- out->stream.common.set_format = out_set_format;
- out->stream.common.standby = out_standby;
- out->stream.common.dump = out_dump;
- out->stream.common.set_parameters = out_set_parameters;
- out->stream.common.get_parameters = out_get_parameters;
- out->stream.common.add_audio_effect = out_add_audio_effect;
- out->stream.common.remove_audio_effect = out_remove_audio_effect;
- out->stream.get_latency = out_get_latency;
- out->stream.set_volume = out_set_volume;
- out->stream.write = out_write;
- out->stream.get_render_position = out_get_render_position;
+ while (!self->terminate) {
+ pthread_mutex_lock(&self->lock);
+ if (!self->wait) {
+ pthread_cond_wait(&self->waitcv, &self->lock);
+ }
+ self->wait = false;
+ pthread_mutex_unlock(&self->lock);
- out->adev = adev;
+ if (self->terminate) {
+ break;
+ }
- pthread_mutex_lock(&adev->route_lock);
- adev->devices &= ~AUDIO_DEVICE_OUT_ALL;
- adev->devices |= devices;
- select_devices(adev);
- pthread_mutex_unlock(&adev->route_lock);
+ /* We must protect against any failure by the main thread to open
+ * the compressed channel
+ */
+ compress = self->compress;
+ if (compress != NULL) {
+ ALOGV("VC wait");
+ ret = compress_wait(compress, -1);
- *channels = out_get_channels(&out->stream.common);
- *format = out_get_format(&out->stream.common);
- *sample_rate = out_get_sample_rate(&out->stream.common);
+ if (self->terminate) {
+ break;
+ }
- /* Should query the driver for parameters and compute defaults
- * from those; should also support configuration from file and
- * buffer resizing.
+ if (ret == 0) {
+ self->triggered = true;
+ ALOGV("VC trigger %d", ret);
+ (self->callback)(self->callback_param);
+ }
+ }
+ }
+
+ if (self->own_compress && self->compress) {
+ ALOGV("VC trigger thread closes compress");
+ compress_close(self->compress);
+ }
+ free(self);
+
+ ALOGV("VC trigger thread terminates");
+ return NULL;
+}
+
+static void start_voice_control_wait(struct voice_control_trigger *self)
+{
+ ALOGV("start_voice_control_wait");
+
+ pthread_mutex_lock(&self->lock);
+ self->triggered = false;
+ self->wait = true;
+ pthread_cond_signal(&self->waitcv);
+ pthread_mutex_unlock(&self->lock);
+}
+
+static void cancel_voice_control_wait(struct voice_control_trigger *in)
+{
+ ALOGV("cancel_voice_control_wait");
+}
+
+static struct voice_control_trigger* create_voice_control_trigger()
+{
+ static struct voice_control_trigger *t;
+ int ret;
+
+ t = calloc(1, sizeof(struct voice_control_trigger));
+
+ if (t != NULL) {
+ pthread_mutex_init(&t->lock, NULL);
+ pthread_cond_init(&t->waitcv, NULL);
+ pthread_mutex_lock(&t->lock);
+
+ ret = pthread_create(&t->thread, NULL, voice_control_trigger_thread, t);
+ if (ret != 0) {
+ goto fail;
+ }
+
+ /* Wait for thread to initialize */
+ pthread_cond_wait(&t->waitcv, &t->lock);
+ pthread_mutex_unlock(&t->lock);
+ }
+
+ return t;
+
+fail:
+ free(t);
+ return NULL;
+}
+
+static void destroy_voice_control_trigger(struct voice_control_trigger *self,
+ bool close_channel)
+{
+ ALOGV("+destroy_voice_control_trigger (close=%d)", close_channel);
+
+ if (self != NULL) {
+ pthread_mutex_lock(&self->lock);
+ if (close_channel && (self->compress != NULL)) {
+ /* Take ownership of the compressed channel and close */
+ /* it as we terminate */
+ /* Must stop the channel to force thread out of the poll */
+ self->own_compress = true;
+ self->terminate = true;
+
+ /* Kick the driver to create an error condition */
+ /* so wait thread will exit the poll */
+
+ if (!is_compress_running(self->compress)) {
+ /* If it's not running, do a dummy start so we */
+ /* can force a stop */
+ compress_start(self->compress);
+ }
+ compress_stop(self->compress);
+ }
+
+ pthread_cond_signal(&self->waitcv);
+ pthread_mutex_unlock(&self->lock);
+
+ /* A struct voice_control_trigger cannot be created without a thread */
+ /* so it is safe to just call pthread_join() here */
+ pthread_join(self->thread, NULL);
+ }
+
+ ALOGV("-destroy_voice_control_trigger");
+}
+
+static void voice_control_callback(void *param)
+{
+ struct stream_in_pcm *in = param;
+
+ if (in->common.standby) {
+ ALOGV("VC trig");
+ send_voice_trigger();
+ } else {
+ ALOGV("VC trig ignored (not in standby)");
+ }
+}
+
+static int init_voice_control(struct stream_in_pcm *in)
+{
+ struct audio_device *adev = in->common.dev;
+ struct voice_control_trigger *t;
+ int ret;
+
+ ALOGV("+init_voice_control");
+ if (!stream_is_compressed(in->common.hw)) {
+ /* We don't support triggering on PCM streams */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ t = create_voice_control_trigger();
+ if (t == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ t->compress = in->compress;
+ t->callback = voice_control_callback;
+ t->callback_param = in;
+ in->vc_trigger = t;
+ ret = 0;
+
+out:
+ ALOGV("-init_voice_control %d", ret);
+ return ret;
+}
+
+/*********************************************************************
+ * PCM input stream via compressed channel
+ *********************************************************************/
+
+/* must be called with hw device and input stream mutexes locked */
+static int do_open_compress_pcm_in(struct stream_in_pcm *in)
+{
+ struct snd_codec codec;
+ struct compress *compress;
+ int ret;
+
+ ALOGV("+do_open_compress_pcm_in");
+
+ memset(&codec, 0, sizeof(codec));
+ codec.id = SND_AUDIOCODEC_PCM;
+ codec.ch_in = in->common.channel_count;
+ codec.sample_rate = in->common.sample_rate;
+ codec.format = SNDRV_PCM_FORMAT_S16_LE;
+
+ /* Fragment and buffer sizes should be configurable or auto-detected
+ * but are currently just hardcoded
*/
- out->config.channels = 2;
- out->config.rate = out_get_sample_rate(&out->stream.common);
- out->config.period_count = 4;
- out->config.period_size = 1024;
- out->config.format = PCM_FORMAT_S16_LE;
+ struct compr_config config = {
+ .fragment_size = 4096,
+ .fragments = 1,
+ .codec = &codec
+ };
- LOGV("Opened output stream %p\n", out);
+ compress = compress_open(in->common.hw->card_number,
+ in->common.hw->device_number,
+ COMPRESS_OUT,
+ &config);
- *stream_out = &out->stream;
+ if (!compress || !is_compress_ready(compress)) {
+ ret = errno;
+ ALOGE_IF(compress,"compress_open(in) failed: %s", compress_get_error(compress));
+ ALOGE_IF(!compress,"compress_open(in) failed");
+ compress_close(compress);
+ goto exit;
+ }
+ in->compress = compress;
+ in->common.buffer_size = config.fragment_size * config.fragments * in->common.frame_size;
+ compress_start(in->compress);
+ ret = 0;
+
+exit:
+ ALOGV("-do_open_compress_pcm_in (%d)", ret);
+ return ret;
+}
+
+/* must be called with hw device and input stream mutexes locked */
+static int start_compress_pcm_input_stream(struct stream_in_pcm *in)
+{
+ struct audio_device *adev = in->common.dev;
+ int ret;
+
+ ALOGV("start_compress_pcm_input_stream");
+
+ if (in->common.standby) {
+ /* For voice control we don't need to cancel a pending trigger because
+ * if it's still waiting for a trigger, we won't return any data and
+ * will eventually be put back into standby to continue waiting for
+ * a trigger
+ */
+ if (in->vc_trigger == NULL) {
+ ret = do_open_compress_pcm_in(in);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ adev->active_in = in;
+ in->common.standby = 0;
+ }
+
+ return 0;
+}
+
+static void do_in_compress_pcm_start_vc_trigger(struct stream_in_pcm *in)
+{
+ int ret = do_open_compress_pcm_in(in);
+ if (ret == 0) {
+ in->vc_trigger->compress = in->compress;
+ start_voice_control_wait(in->vc_trigger);
+ }
+}
+
+/* must be called with hw device and input stream mutexes locked */
+static void do_in_compress_pcm_standby(struct stream_in_pcm *in)
+{
+ struct compress *c;
+ int ret;
+
+ ALOGV("+do_in_compress_pcm_standby");
+
+ if (!in->common.standby) {
+ /* Always close, even for a voice control channel.
+ * For voice control we must close the channel after each command
+ */
+ c = in->compress;
+ in->compress = NULL;
+
+ if (in->vc_trigger != NULL) {
+ destroy_voice_control_trigger(in->vc_trigger, true);
+ in->vc_trigger = NULL;
+
+ /* For voice control we must re-open the channel to */
+ /* wait for next trigger */
+ init_voice_control(in);
+ do_in_compress_pcm_start_vc_trigger(in);
+ } else {
+ compress_stop(c);
+ compress_close(c);
+ }
+
+ }
+ in->common.standby = true;
+
+ ALOGV("-do_in_compress_pcm_standby");
+}
+
+static ssize_t do_in_compress_pcm_read(struct audio_stream_in *stream, void* buffer,
+ size_t bytes)
+{
+ struct stream_in_pcm *in = (struct stream_in_pcm *)stream;
+ struct audio_device *adev = in->common.dev;
+ size_t frames_rq = bytes / in->common.frame_size;
+ nsecs_t t1;
+ nsecs_t t2;
+ nsecs_t interval;
+ struct timespec ts;
+ int ret = 0;
+
+ ALOGV("+do_in_compress_pcm_read %d", bytes);
+
+ if (get_current_routes(in->common.hw) == 0) {
+ ALOGV("-do_in_compress_pcm_read(%p) 0 (no routes)", stream);
+ return 0;
+ }
+
+ pthread_mutex_lock(&adev->lock);
+ pthread_mutex_lock(&in->common.lock);
+ ret = start_compress_pcm_input_stream(in);
+ pthread_mutex_unlock(&adev->lock);
+
+ if (ret < 0) {
+ goto exit;
+ }
+
+ if (in->vc_trigger != NULL) {
+ if (!in->vc_trigger->triggered) {
+ /* Read without trigger, no data will be available */
+ ALOGV("read without voice trigger - not returning data");
+ ret = 0;
+ goto exit;
+ }
+ }
+
+ t1 = systemTime(SYSTEM_TIME_MONOTONIC);
+ ret = compress_read(in->compress, buffer, bytes);
+ t2 = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ if (ret > 0) {
+ /* The interface between AudioFlinger and AudioRecord cannot cope
+ * with bursty data and will lockup for periods if the data does
+ * not come as a smooth stream. So we must limit the rate that we
+ * deliver PCM buffers to approximately how long the buffer would
+ * have taken to read at its PCM sample rate
+ */
+
+ interval = (1000000000LL * (int64_t)ret) / (in->common.frame_size * in->common.sample_rate);
+ interval -= (interval / 4); /* wait for 75% of PCM time to avoid gaps */
+ t2 -= t1; /* elapsed interval */
+ if (interval > t2) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = interval - t2;
+ nanosleep(&ts, NULL);
+ }
+ }
+
+ /*
+ * Instead of writing zeroes here, we could trust the hardware
+ * to always provide zeroes when muted.
+ */
+ if (ret == 0 && adev->mic_mute) {
+ memset(buffer, 0, bytes);
+ ret = bytes;
+ }
+
+exit:
+ pthread_mutex_unlock(&in->common.lock);
+
+ ALOGV("-do_in_compress_pcm_read (%d)", ret);
+ return ret;
+}
+
+static void do_in_compress_pcm_close(struct stream_in_pcm *in)
+{
+ ALOGV("+do_in_compress_pcm_close");
+
+ if (in->vc_trigger != NULL) {
+ destroy_voice_control_trigger(in->vc_trigger, true);
+ } else if (in->compress != NULL) {
+ compress_stop(in->compress);
+ compress_close(in->compress);
+ }
+
+ ALOGV("-do_in_compress_pcm_close");
+}
+
+/*********************************************************************
+ * PCM input stream
+ *********************************************************************/
+
+static unsigned int in_pcm_cfg_period_count(struct stream_in_pcm *in)
+{
+ if (in->common.hw->period_count != 0) {
+ return in->common.hw->period_count;
+ } else {
+ return IN_PERIOD_COUNT_DEFAULT;
+ }
+}
+
+static unsigned int in_pcm_cfg_period_size(struct stream_in_pcm *in)
+{
+ if (in->common.hw->period_size != 0) {
+ return in->common.hw->period_size;
+ } else {
+ return IN_PERIOD_SIZE_DEFAULT;
+ }
+}
+
+static unsigned int in_pcm_cfg_rate(struct stream_in_pcm *in)
+{
+ if (in->common.hw->rate != 0) {
+ return in->common.hw->rate;
+ } else {
+ return IN_SAMPLING_RATE_DEFAULT;
+ }
+}
+
+/* must be called with hw device and input stream mutexes locked */
+static void do_in_pcm_standby(struct stream_in_pcm *in)
+{
+ struct audio_device *adev = in->common.dev;
+
+ ALOGV("+do_in_pcm_standby");
+
+ if (!in->common.standby) {
+ pcm_close(in->pcm);
+ in->pcm = NULL;
+ }
+ adev->active_in = NULL;
+ if (in->resampler) {
+ release_resampler(in->resampler);
+ in->resampler = NULL;
+ }
+ if (in->buffer) {
+ free(in->buffer);
+ in->buffer = NULL;
+ }
+ in->common.standby = true;
+
+ ALOGV("-do_in_pcm_standby");
+}
+
+static void in_pcm_fill_params(struct stream_in_pcm *in,
+ const struct pcm_config *config )
+{
+ size_t size;
+
+ in->hw_sample_rate = config->rate;
+ in->hw_channel_count = config->channels;
+ in->period_size = config->period_size;
+
+ /*
+ * take resampling into account and return the closest majoring
+ * multiple of 16 frames, as audioflinger expects audio buffers to
+ * be a multiple of 16 frames
+ */
+ size = (config->period_size * in->common.sample_rate) / config->rate;
+ size = ((size + 15) / 16) * 16;
+ in->common.buffer_size = size * in->common.frame_size;
+
+}
+
+/* must be called with hw device and input stream mutexes locked */
+static int do_open_pcm_input(struct stream_in_pcm *in)
+{
+ int ret;
+
+ struct pcm_config config = {
+ .channels = popcount(IN_CHANNEL_MASK_DEFAULT),
+ .rate = in_pcm_cfg_rate(in),
+ .period_size = in_pcm_cfg_period_size(in),
+ .period_count = in_pcm_cfg_period_count(in),
+ .format = PCM_FORMAT_S16_LE,
+ };
+
+ config.start_threshold = config.period_size * config.period_count;
+
+ ALOGV("+do_open_pcm_input");
+
+ in->pcm = pcm_open(in->common.hw->card_number,
+ in->common.hw->device_number,
+ PCM_IN,
+ &config);
+
+ if (!in->pcm || !pcm_is_ready(in->pcm)) {
+ ALOGE_IF(in->pcm,"pcm_open(in) failed: %s", pcm_get_error(in->pcm));
+ ALOGE_IF(!in->pcm,"pcm_open(in) failed");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ in_pcm_fill_params( in, &config );
+
+ ALOGV("input buffer size=0x%x", in->common.buffer_size);
+
+ /*
+ * If the stream rate differs from the PCM rate, we need to
+ * create a resampler.
+ */
+ if (in_get_sample_rate(&in->common.stream.common) != config.rate) {
+ in->in_buffer_size = config.period_size * config.period_count *
+ config.channels * 2;
+ in->in_buffer_frames = in->in_buffer_size / (config.channels * 2);
+ in->buffer = malloc(in->in_buffer_size);
+
+ if (!in->buffer) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ in->buf_provider.get_next_buffer = get_next_buffer;
+ in->buf_provider.release_buffer = release_buffer;
+
+ ret = create_resampler(config.rate,
+ in->common.sample_rate,
+ in->common.channel_count,
+ RESAMPLER_QUALITY_DEFAULT,
+ &in->buf_provider,
+ &in->resampler);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+ ALOGV("-do_open_pcm_input");
+ return 0;
+
+fail:
+ pcm_close(in->pcm);
+ in->pcm = NULL;
+ ALOGV("-do_open_pcm_input error:%d", ret);
+ return ret;
+}
+
+/* must be called with hw device and input stream mutexes locked */
+static int start_pcm_input_stream(struct stream_in_pcm *in)
+{
+ struct audio_device *adev = in->common.dev;
+ int ret;
+
+ ret = do_open_pcm_input(in);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ adev->active_in = in;
+ return 0;
+}
+
+static int change_input_locale_locked(struct stream_in_pcm *in, const char *locale)
+{
+ int ret;
+
+ if (in->vc_trigger) {
+ if (!in->common.standby) {
+ ALOGE("attempt to change input locale while active");
+ return -EINVAL;
+ }
+
+ ALOGE("change voice control locale to %s", locale);
+
+ destroy_voice_control_trigger(in->vc_trigger, true);
+
+ /* Execute locale-change use-case */
+ ret = apply_use_case(in->common.hw, "locale", locale);
+ if (ret == -ENOSYS) {
+ /* use-case not implemented.
+ * As we don't support the requested locale switch to default
+ * rather than potentially staying in a totally wrong language
+ */
+ apply_use_case(in->common.hw, "locale", "");
+ }
+
+ /* restart the trigger wait */
+ init_voice_control(in);
+ do_in_compress_pcm_start_vc_trigger(in);
+ }
+
+ return 0;
+}
+
+static int change_input_source_locked(struct stream_in_pcm *in, const char *value,
+ uint32_t devices, bool *was_changed)
+{
+ struct audio_device *adev = in->common.dev;
+ struct audio_config config;
+ const char *stream_name;
+ const struct hw_stream *hw;
+ bool voice_control = false;
+ const int new_source = atoi(value);
+
+ *was_changed = false;
+
+ if (!in->common.standby) {
+ ALOGE("attempt to change input source while active");
+ return -EINVAL;
+ }
+
+ if (in->common.input_source == new_source) {
+ ALOGV("input source not changed");
+ return 0;
+ }
+
+ /* Special input sources are obtained from the configuration
+ * by opening a named stream
+ */
+ switch (new_source) {
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ /* We should verify here that current frame size, sample rate and
+ * channels are compatible
+ */
+
+ stream_name = "voice recognition";
+ voice_control = true;
+ break;
+
+ default:
+ stream_name = NULL;
+ break;
+ }
+
+ if (stream_name) {
+ hw = get_named_stream(in->common.dev->cm, stream_name);
+ ALOGV_IF(hw != NULL, "Changing input source to %s", stream_name);
+ } else {
+ memset(&config, 0, sizeof(config));
+ config.sample_rate = in->common.sample_rate;
+ config.channel_mask = in->common.channel_mask;
+ config.format = in->common.format;
+ hw = get_stream(in->common.dev->cm, devices, 0, &config);
+ ALOGV_IF(hw != NULL, "Changing to default input source for devices 0x%x",
+ devices);
+ }
+
+ if (hw != NULL) {
+ /* A normal stream will be in standby and therefore device node */
+ /* is closed when we get here. Only in case of a voice control */
+ /* stream will it still be open */
+ if (in->vc_trigger != NULL) {
+ destroy_voice_control_trigger(in->vc_trigger, true);
+ }
+ release_stream(in->common.hw);
+ in->common.hw = hw;
+
+ in->vc_trigger = NULL;
+
+ if (voice_control) {
+ /* Voice control wait will be started when AudioFlinger
+ * puts stream into standby
+ */
+ init_voice_control(in);
+
+ adev->active_voice_control = in;
+ } else if (adev->active_voice_control == in) {
+ adev->active_voice_control = NULL;
+ }
+ in->common.input_source = new_source;
+ *was_changed = true;
+ return 0;
+ } else {
+ ALOGV("Could not open new input stream");
+ return -EINVAL;
+ }
+}
+
+static int get_next_buffer(struct resampler_buffer_provider *buffer_provider,
+ struct resampler_buffer* buffer)
+{
+ struct stream_in_pcm *in;
+
+ if (buffer_provider == NULL || buffer == NULL) {
+ return -EINVAL;
+ }
+
+ in = (struct stream_in_pcm *)((char *)buffer_provider -
+ offsetof(struct stream_in_pcm, buf_provider));
+
+ if (in->pcm == NULL) {
+ buffer->raw = NULL;
+ buffer->frame_count = 0;
+ in->read_status = -ENODEV;
+ return -ENODEV;
+ }
+
+ if (in->frames_in == 0) {
+ in->read_status = pcm_read(in->pcm,
+ (void*)in->buffer,
+ in->in_buffer_size);
+ if (in->read_status != 0) {
+ ALOGE("get_next_buffer() pcm_read error %d", errno);
+ buffer->raw = NULL;
+ buffer->frame_count = 0;
+ return in->read_status;
+ }
+ in->frames_in = in->in_buffer_frames;
+ if ((in->common.channel_count == 1) && (in->hw_channel_count == 2)) {
+ unsigned int i;
+
+ /* Discard right channel */
+ for (i = 1; i < in->frames_in; i++) {
+ in->buffer[i] = in->buffer[i * 2];
+ }
+ }
+ }
+
+ buffer->frame_count = (buffer->frame_count > in->frames_in) ?
+ in->frames_in : buffer->frame_count;
+ buffer->i16 = (int16_t*)in->buffer + ((in->in_buffer_frames - in->frames_in));
+
+ return in->read_status;
+
+}
+
+static void release_buffer(struct resampler_buffer_provider *buffer_provider,
+ struct resampler_buffer* buffer)
+{
+ struct stream_in_pcm *in;
+
+ if (buffer_provider == NULL || buffer == NULL)
+ return;
+
+ in = (struct stream_in_pcm *)((char *)buffer_provider -
+ offsetof(struct stream_in_pcm, buf_provider));
+
+ in->frames_in -= buffer->frame_count;
+}
+
+/* read_frames() reads frames from kernel driver, down samples to capture rate
+ * if necessary and output the number of frames requested to the buffer specified */
+static ssize_t read_frames(struct stream_in_pcm *in, void *buffer, ssize_t frames)
+{
+ ssize_t frames_wr = 0;
+
+ while (frames_wr < frames) {
+ size_t frames_rd = frames - frames_wr;
+ if (in->resampler != NULL) {
+ in->resampler->resample_from_provider(in->resampler,
+ (int16_t *)((char *)buffer +
+ frames_wr * in->common.frame_size),
+ &frames_rd);
+ } else {
+ struct resampler_buffer buf = {
+ { raw : NULL, },
+ frame_count : frames_rd,
+ };
+ get_next_buffer(&in->buf_provider, &buf);
+ if (buf.raw != NULL) {
+ memcpy((char *)buffer +
+ frames_wr * in->common.frame_size,
+ buf.raw,
+ buf.frame_count * in->common.frame_size);
+ frames_rd = buf.frame_count;
+ }
+ release_buffer(&in->buf_provider, &buf);
+ }
+ /* in->read_status is updated by getNextBuffer() also called by
+ * in->resampler->resample_from_provider() */
+ if (in->read_status != 0)
+ return in->read_status;
+
+ frames_wr += frames_rd;
+ }
+ return frames_wr;
+}
+
+static ssize_t do_in_pcm_read(struct audio_stream_in *stream, void* buffer,
+ size_t bytes)
+{
+ int ret = 0;
+ struct stream_in_pcm *in = (struct stream_in_pcm *)stream;
+ struct audio_device *adev = in->common.dev;
+ size_t frames_rq = bytes / in->common.frame_size;
+
+ ALOGV("+in_pcm_read %d", bytes);
+
+ if (get_current_routes(in->common.hw) == 0) {
+ ALOGV("-in_pcm_read(%p) 0 (no routes)", stream);
+ return 0;
+ }
+
+ /*
+ * acquiring hw device mutex systematically is useful if a low
+ * priority thread is waiting on the input stream mutex - e.g.
+ * executing in_set_parameters() while holding the hw device
+ * mutex
+ */
+ pthread_mutex_lock(&adev->lock);
+ pthread_mutex_lock(&in->common.lock);
+ if (in->common.standby) {
+ ret = start_pcm_input_stream(in);
+ if (ret == 0) {
+ in->common.standby = 0;
+ }
+ }
+ pthread_mutex_unlock(&adev->lock);
+
+ if (ret < 0) {
+ goto exit;
+ }
+
+ if (in->resampler != NULL) {
+ ret = read_frames(in, buffer, frames_rq);
+ } else {
+ ret = pcm_read(in->pcm, buffer, bytes);
+ }
+
+ /*
+ * Instead of writing zeroes here, we could trust the hardware
+ * to always provide zeroes when muted.
+ */
+ if (ret == 0 && adev->mic_mute) {
+ memset(buffer, 0, bytes);
+ }
+
+ if (ret >= 0) {
+ ret = bytes;
+ }
+
+exit:
+ if (ret < 0) {
+ usleep(bytes * 1000000 / in->common.frame_size /
+ in->common.sample_rate);
+ }
+
+ pthread_mutex_unlock(&in->common.lock);
+
+ ALOGV("-in_pcm_read (%d)", ret);
+ return ret;
+}
+
+static int in_pcm_standby(struct audio_stream *stream)
+{
+ struct stream_in_pcm *in = (struct stream_in_pcm *)stream;
+
+ pthread_mutex_lock(&in->common.dev->lock);
+ pthread_mutex_lock(&in->common.lock);
+
+ if (stream_is_compressed_in(in->common.hw)) {
+ do_in_compress_pcm_standby(in);
+ } else {
+ do_in_pcm_standby(in);
+ }
+
+ pthread_mutex_unlock(&in->common.lock);
+ pthread_mutex_unlock(&in->common.dev->lock);
+
+ return 0;
+}
+
+static ssize_t in_pcm_read(struct audio_stream_in *stream, void* buffer,
+ size_t bytes)
+{
+ struct stream_in_pcm *in = (struct stream_in_pcm *)stream;
+
+ if (stream_is_compressed_in(in->common.hw)) {
+ return do_in_compress_pcm_read(stream, buffer, bytes);
+ } else {
+ return do_in_pcm_read(stream, buffer, bytes);
+ }
+}
+
+static int in_pcm_set_parameters(struct audio_stream *stream, const char *kvpairs)
+{
+ struct stream_in_pcm *in = (struct stream_in_pcm *)stream;
+ struct audio_device *adev = in->common.dev;
+ struct str_parms *parms;
+ char value[32];
+ uint32_t new_routing = 0;
+ bool routing_changed;
+ uint32_t devices;
+ bool input_was_changed;
+ bool start_vc_trig = false;
+ int ret;
+
+ ALOGV("+in_pcm_set_parameters(%p) '%s'", stream, kvpairs);
+
+ ret = common_get_routing_param(&new_routing, kvpairs);
+ routing_changed = (ret >= 0);
+ parms = str_parms_create_str(kvpairs);
+
+ pthread_mutex_lock(&adev->lock);
+
+ if(str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_INPUT_SOURCE,
+ value, sizeof(value)) >= 0) {
+
+ if (routing_changed) {
+ devices = new_routing;
+ } else {
+ /* Route new stream to same devices as current stream */
+ devices = get_routed_devices(in->common.hw);
+ }
+
+ ret = change_input_source_locked(in, value, devices, &input_was_changed);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* We must apply any existing routing to the new stream */
+ new_routing = devices;
+ routing_changed = true;
+
+ /* Defer starting the voice control trigger wait until */
+ /* the routing has been set up */
+ if ((in->vc_trigger != NULL) && (input_was_changed)) {
+ start_vc_trig = true;
+ }
+ }
+
+ if (routing_changed) {
+ ALOGV("Apply routing=0x%x to input stream", new_routing);
+ apply_route(in->common.hw, new_routing);
+ ret = 0;
+ }
+
+ if (start_vc_trig) {
+ do_in_compress_pcm_start_vc_trigger(in);
+ }
+
+ common_set_parameters_locked(in->common.hw, kvpairs);
+
+out:
+ pthread_mutex_unlock(&adev->lock);
+ str_parms_destroy(parms);
+
+ ALOGV("-in_pcm_set_parameters(%p):%d", stream, ret);
+ return ret;
+}
+
+static void do_close_in_pcm(struct audio_stream *stream)
+{
+ struct stream_in_pcm *in = (struct stream_in_pcm *)stream;
+
+ if (stream_is_compressed(in->common.hw)) {
+ do_in_compress_pcm_close(in);
+ }
+ do_close_in_common(stream);
+}
+
+static int do_init_in_pcm( struct stream_in_pcm *in,
+ struct audio_config *config )
+{
+ in->common.close = do_close_in_pcm;
+ in->common.stream.common.standby = in_pcm_standby;
+ in->common.stream.common.set_parameters = in_pcm_set_parameters;
+ in->common.stream.read = in_pcm_read;
+
+ /* Default settings for stereo capture */
+ in->common.buffer_size = in_pcm_cfg_period_size(in)
+ * in_pcm_cfg_period_count(in)
+ * 2;
+
+ if (in->common.buffer_size > IN_COMPRESS_BUFFER_SIZE_DEFAULT) {
+ in->common.buffer_size = IN_COMPRESS_BUFFER_SIZE_DEFAULT;
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ * Stream open and close
+ *********************************************************************/
+static int adev_open_output_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ struct audio_stream_out **stream_out)
+{
+ struct audio_device *adev = (struct audio_device *)dev;
+ union {
+ struct stream_out_common *common;
+ struct stream_out_pcm *pcm;
+ } out;
+ int ret;
+
+ ALOGV("+adev_open_output_stream");
+
+ devices &= AUDIO_DEVICE_OUT_ALL;
+ const struct hw_stream *hw = get_stream(adev->cm, devices, flags, config);
+ if (!hw) {
+ ALOGE("No suitable output stream for devices=0x%x flags=0x%x format=0x%x",
+ devices, flags, config->format );
+ ret = -EINVAL;
+ goto err_fail;
+ }
+
+ out.common = calloc(1, sizeof(struct stream_out_pcm));
+ if (!out.common) {
+ ret = -ENOMEM;
+ goto err_fail;
+ }
+
+ out.common->dev = adev;
+ out.common->hw = hw;
+ ret = do_init_out_common( out.common, config, devices );
+ if (ret < 0) {
+ goto err_open;
+ }
+
+ ret = do_init_out_pcm( out.pcm, config );
+ if (ret < 0) {
+ goto err_open;
+ }
+
+ /* Update config with initial stream settings */
+ config->format = out.common->format;
+ config->channel_mask = out.common->channel_mask;
+ config->sample_rate = out.common->sample_rate;
+
+ *stream_out = &out.common->stream;
+ ALOGV("-adev_open_output_stream=%p", *stream_out);
return 0;
err_open:
- free(out);
+ free(out.common);
*stream_out = NULL;
+err_fail:
+ ALOGV("-adev_open_output_stream (%d)", ret);
return ret;
}
static void adev_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
- struct tiny_stream_out *out = (struct tiny_stream_out *)stream;
- LOGV("Closing output stream %p\n", stream);
- if (out->pcm)
- pcm_close(out->pcm);
- free(stream);
+ struct stream_out_common *out = (struct stream_out_common *)stream;
+ ALOGV("adev_close_output_stream(%p)", stream);
+ (out->close)(&stream->common);
}
+static int adev_open_input_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ struct audio_stream_in **stream_in)
+{
+ struct audio_device *adev = (struct audio_device *)dev;
+ struct stream_in_pcm *in = NULL;
+ const struct hw_stream *hw = NULL;
+ int ret;
+
+ ALOGV("+adev_open_input_stream");
+
+ *stream_in = NULL;
+
+ /* Respond with a request for mono if a different format is given. */
+ if (config->channel_mask != AUDIO_CHANNEL_IN_MONO) {
+ config->channel_mask = AUDIO_CHANNEL_IN_MONO;
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ devices &= AUDIO_DEVICE_IN_ALL;
+ hw = get_stream(adev->cm, devices, 0, config);
+ if (!hw) {
+ ALOGE("No suitable input stream for devices=0x%x format=0x%x",
+ devices, config->format );
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ in = (struct stream_in_pcm *)calloc(1, sizeof(struct stream_in_pcm));
+ if (!in) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ in->common.dev = adev;
+ in->common.hw = hw;
+ ret = do_init_in_common( &in->common, config, devices );
+ if (ret < 0) {
+ goto fail;
+ }
+ ret = do_init_in_pcm( in, config );
+ if (ret < 0) {
+ goto fail;
+ }
+
+ *stream_in = &in->common.stream;
+ return 0;
+
+fail:
+ free(in);
+ free((void *)hw);
+ ALOGV("-adev_open_input_stream (%d)", ret);
+ return ret;
+}
+
+static void adev_close_input_stream(struct audio_hw_device *dev,
+ struct audio_stream_in *stream)
+{
+ struct stream_in_common *in = (struct stream_in_common *)stream;
+ ALOGV("adev_close_input_stream(%p)", stream);
+ (in->close)(&stream->common);
+}
+
+/*********************************************************************
+ * Global API functions
+ *********************************************************************/
static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
{
- return -ENOSYS;
+ struct audio_device *adev = (struct audio_device *)dev;
+ struct str_parms *parms;
+ char *str;
+ char value[32];
+ int ret;
+
+ parms = str_parms_create_str(kvpairs);
+ ret = str_parms_get_str(parms, "orientation", value, sizeof(value));
+ if (ret >= 0) {
+ int orientation;
+
+ if (strcmp(value, "landscape") == 0)
+ orientation = ORIENTATION_LANDSCAPE;
+ else if (strcmp(value, "portrait") == 0)
+ orientation = ORIENTATION_PORTRAIT;
+ else if (strcmp(value, "square") == 0)
+ orientation = ORIENTATION_SQUARE;
+ else
+ orientation = ORIENTATION_UNDEFINED;
+
+ pthread_mutex_lock(&adev->lock);
+ if (orientation != adev->orientation) {
+ adev->orientation = orientation;
+ /* Change routing for any streams that change with orientation */
+ rotate_routes(adev->cm, orientation);
+ }
+ pthread_mutex_unlock(&adev->lock);
+ }
+
+ ret = str_parms_get_str(parms, "screen_state", value, sizeof(value));
+ if (ret >= 0) {
+ if (strcmp(value, AUDIO_PARAMETER_VALUE_ON) == 0)
+ adev->screen_off = false;
+ else
+ adev->screen_off = true;
+ }
+
+ /* locale changes are only relevant to the voice control stream but
+ * Android does not expose the per-stream setParameters() ability to
+ * Java apps so we must handle this through the global setParameters
+ */
+ if(str_parms_get_str(parms, "locale", value, sizeof(value)) >= 0) {
+ if (adev->active_voice_control) {
+ ret = change_input_locale_locked(adev->active_voice_control, value);
+ }
+ }
+
+ str_parms_destroy(parms);
+ return ret;
}
static char * adev_get_parameters(const struct audio_hw_device *dev,
const char *keys)
{
- return NULL;
+ return strdup("");
}
static int adev_init_check(const struct audio_hw_device *dev)
@@ -510,92 +1964,38 @@
return -ENOSYS;
}
-static int adev_set_mode(struct audio_hw_device *dev, int mode)
+static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode)
{
return 0;
}
static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
{
- return -ENOSYS;
+ struct audio_device *adev = (struct audio_device *)dev;
+
+ adev->mic_mute = state;
+
+ return 0;
}
static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
{
- return -ENOSYS;
+ struct audio_device *adev = (struct audio_device *)dev;
+
+ *state = adev->mic_mute;
+
+ return 0;
}
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
- uint32_t sample_rate, int format,
- int channel_count)
+ const struct audio_config *config)
{
- return 320;
-}
+ size_t s = IN_PERIOD_SIZE_DEFAULT * IN_PERIOD_COUNT_DEFAULT * 2;
+ if (s > IN_COMPRESS_BUFFER_SIZE_DEFAULT) {
+ s = IN_COMPRESS_BUFFER_SIZE_DEFAULT;
+ }
-static int adev_open_input_stream(struct audio_hw_device *dev, uint32_t devices,
- int *format, uint32_t *channels,
- uint32_t *sample_rate,
- audio_in_acoustics_t acoustics,
- struct audio_stream_in **stream_in)
-{
- struct tiny_audio_device *adev = (struct tiny_audio_device *)dev;
- struct tiny_stream_in *in;
- int ret;
- int channel_count = popcount(*channels);
-
- in = calloc(1, sizeof(struct tiny_stream_in));
- if (!in)
- return -ENOMEM;
-
- pthread_mutex_init(&in->lock, NULL);
- in->adev = adev;
-
- in->stream.common.get_sample_rate = in_get_sample_rate;
- in->stream.common.set_sample_rate = in_set_sample_rate;
- in->stream.common.get_buffer_size = in_get_buffer_size;
- in->stream.common.get_channels = in_get_channels;
- in->stream.common.get_format = in_get_format;
- in->stream.common.set_format = in_set_format;
- in->stream.common.standby = in_standby;
- in->stream.common.dump = in_dump;
- in->stream.common.set_parameters = in_set_parameters;
- in->stream.common.get_parameters = in_get_parameters;
- in->stream.common.add_audio_effect = in_add_audio_effect;
- in->stream.common.remove_audio_effect = in_remove_audio_effect;
- in->stream.set_gain = in_set_gain;
- in->stream.read = in_read;
- in->stream.get_input_frames_lost = in_get_input_frames_lost;
-
- pthread_mutex_lock(&adev->route_lock);
- adev->devices &= ~AUDIO_DEVICE_IN_ALL;
- adev->devices |= devices;
- select_devices(adev);
- pthread_mutex_unlock(&adev->route_lock);
-
- in->config.channels = 2;
- in->config.rate = 44100;
- in->config.period_count = 4;
- in->config.period_size = 320;
- in->config.format = PCM_FORMAT_S16_LE;
-
- *stream_in = &in->stream;
- return 0;
-
-err_open:
- free(in);
- *stream_in = NULL;
- return ret;
-}
-
-static void adev_close_input_stream(struct audio_hw_device *dev,
- struct audio_stream_in *stream)
-{
- struct tiny_stream_in *in = (struct tiny_stream_in *)stream;
-
- if (in->pcm)
- pcm_close(in->pcm);
- free(in);
- return;
+ return s;
}
static int adev_dump(const audio_hw_device_t *device, int fd)
@@ -605,306 +2005,72 @@
static int adev_close(hw_device_t *device)
{
+ struct audio_device *adev = (struct audio_device *)device;
+
+ free_audio_config(adev->cm);
+
free(device);
return 0;
}
-static uint32_t adev_get_supported_devices(const struct audio_hw_device *dev)
-{
- struct tiny_audio_device *adev = (struct tiny_audio_device *)dev;
- uint32_t supported = 0;
- int i;
-
- for (i = 0; i < adev->num_dev_cfgs; i++)
- supported |= adev->dev_cfgs[i].mask;
-
- return supported;
-}
-
-struct config_parse_state {
- struct tiny_audio_device *adev;
- struct tiny_dev_cfg *dev;
- bool on;
-
- struct route_setting *path;
- unsigned int path_len;
-};
-
-static const struct {
- int mask;
- const char *name;
-} dev_names[] = {
- { AUDIO_DEVICE_OUT_SPEAKER, "speaker" },
- { AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_OUT_WIRED_HEADPHONE,
- "headphone" },
- { AUDIO_DEVICE_OUT_EARPIECE, "earpiece" },
- { AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET, "analog-dock" },
- { AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, "digital-dock" },
-
- { AUDIO_DEVICE_IN_COMMUNICATION, "comms" },
- { AUDIO_DEVICE_IN_AMBIENT, "ambient" },
- { AUDIO_DEVICE_IN_BUILTIN_MIC, "builtin-mic" },
- { AUDIO_DEVICE_IN_WIRED_HEADSET, "headset" },
- { AUDIO_DEVICE_IN_AUX_DIGITAL, "digital" },
- { AUDIO_DEVICE_IN_BACK_MIC, "back-mic" },
-};
-
-static void adev_config_start(void *data, const XML_Char *elem,
- const XML_Char **attr)
-{
- struct config_parse_state *s = data;
- struct tiny_dev_cfg *dev_cfg;
- const XML_Char *name = NULL;
- const XML_Char *val = NULL;
- unsigned int i, j;
-
- for (i = 0; attr[i]; i += 2) {
- if (strcmp(attr[i], "name") == 0)
- name = attr[i + 1];
-
- if (strcmp(attr[i], "val") == 0)
- val = attr[i + 1];
- }
-
- if (strcmp(elem, "device") == 0) {
- if (!name) {
- LOGE("Unnamed device\n");
- return;
- }
-
- for (i = 0; i < sizeof(dev_names) / sizeof(dev_names[0]); i++) {
- if (strcmp(dev_names[i].name, name) == 0) {
- LOGI("Allocating device %s\n", name);
- dev_cfg = realloc(s->adev->dev_cfgs,
- (s->adev->num_dev_cfgs + 1)
- * sizeof(*dev_cfg));
- if (!dev_cfg) {
- LOGE("Unable to allocate dev_cfg\n");
- return;
- }
-
- s->dev = &dev_cfg[s->adev->num_dev_cfgs];
- memset(s->dev, 0, sizeof(*s->dev));
- s->dev->mask = dev_names[i].mask;
-
- s->adev->dev_cfgs = dev_cfg;
- s->adev->num_dev_cfgs++;
- }
- }
-
- } else if (strcmp(elem, "path") == 0) {
- if (s->path_len)
- LOGW("Nested paths\n");
-
- /* If this a path for a device it must have a role */
- if (s->dev) {
- /* Need to refactor a bit... */
- if (strcmp(name, "on") == 0) {
- s->on = true;
- } else if (strcmp(name, "off") == 0) {
- s->on = false;
- } else {
- LOGW("Unknown path name %s\n", name);
- }
- }
-
- } else if (strcmp(elem, "ctl") == 0) {
- struct route_setting *r;
-
- if (!name) {
- LOGE("Unnamed control\n");
- return;
- }
-
- if (!val) {
- LOGE("No value specified for %s\n", name);
- return;
- }
-
- LOGV("Parsing control %s => %s\n", name, val);
-
- r = realloc(s->path, sizeof(*r) * (s->path_len + 1));
- if (!r) {
- LOGE("Out of memory handling %s => %s\n", name, val);
- return;
- }
-
- r[s->path_len].ctl_name = strdup(name);
- r[s->path_len].strval = NULL;
-
- /* This can be fooled but it'll do */
- r[s->path_len].intval = atoi(val);
- if (!r[s->path_len].intval && strcmp(val, "0") != 0)
- r[s->path_len].strval = strdup(val);
-
- s->path = r;
- s->path_len++;
- }
-}
-
-static void adev_config_end(void *data, const XML_Char *name)
-{
- struct config_parse_state *s = data;
- unsigned int i;
-
- if (strcmp(name, "path") == 0) {
- if (!s->path_len)
- LOGW("Empty path\n");
-
- if (!s->dev) {
- LOGV("Applying %d element default route\n", s->path_len);
-
- set_route_by_array(s->adev->mixer, s->path, s->path_len);
-
- for (i = 0; i < s->path_len; i++) {
- free(s->path[i].ctl_name);
- free(s->path[i].strval);
- }
-
- free(s->path);
-
- /* Refactor! */
- } else if (s->on) {
- LOGV("%d element on sequence\n", s->path_len);
- s->dev->on = s->path;
- s->dev->on_len = s->path_len;
-
- } else {
- LOGV("%d element off sequence\n", s->path_len);
-
- /* Apply it, we'll reenable anything that's wanted later */
- set_route_by_array(s->adev->mixer, s->path, s->path_len);
-
- s->dev->off = s->path;
- s->dev->off_len = s->path_len;
- }
-
- s->path_len = 0;
- s->path = NULL;
-
- } else if (strcmp(name, "device") == 0) {
- s->dev = NULL;
- }
-}
-
-static int adev_config_parse(struct tiny_audio_device *adev)
-{
- struct config_parse_state s;
- FILE *f;
- XML_Parser p;
- char property[PROPERTY_VALUE_MAX];
- char file[80];
- int ret = 0;
- bool eof = false;
- int len;
-
- property_get("ro.product.device", property, "tiny_hw");
- snprintf(file, sizeof(file), "/system/etc/sound/%s", property);
-
- LOGV("Reading configuration from %s\n", file);
- f = fopen(file, "r");
- if (!f) {
- LOGE("Failed to open %s\n", file);
- return -ENODEV;
- }
-
- p = XML_ParserCreate(NULL);
- if (!p) {
- LOGE("Failed to create XML parser\n");
- ret = -ENOMEM;
- goto out;
- }
-
- memset(&s, 0, sizeof(s));
- s.adev = adev;
- XML_SetUserData(p, &s);
-
- XML_SetElementHandler(p, adev_config_start, adev_config_end);
-
- while (!eof) {
- len = fread(file, 1, sizeof(file), f);
- if (ferror(f)) {
- LOGE("I/O error reading config\n");
- ret = -EIO;
- goto out_parser;
- }
- eof = feof(f);
-
- if (XML_Parse(p, file, len, eof) == XML_STATUS_ERROR) {
- LOGE("Parse error at line %u:\n%s\n",
- (unsigned int)XML_GetCurrentLineNumber(p),
- XML_ErrorString(XML_GetErrorCode(p)));
- ret = -EINVAL;
- goto out_parser;
- }
- }
-
- out_parser:
- XML_ParserFree(p);
- out:
- fclose(f);
-
- return ret;
-}
-
static int adev_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
- struct tiny_audio_device *adev;
+ struct audio_device *adev;
int ret;
if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
return -EINVAL;
- adev = calloc(1, sizeof(struct tiny_audio_device));
+ adev = calloc(1, sizeof(struct audio_device));
if (!adev)
return -ENOMEM;
- adev->device.common.tag = HARDWARE_DEVICE_TAG;
- adev->device.common.version = 0;
- adev->device.common.module = (struct hw_module_t *) module;
- adev->device.common.close = adev_close;
+ adev->hw_device.common.tag = HARDWARE_DEVICE_TAG;
+ adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
+ adev->hw_device.common.module = (struct hw_module_t *) module;
+ adev->hw_device.common.close = adev_close;
- adev->device.get_supported_devices = adev_get_supported_devices;
- adev->device.init_check = adev_init_check;
- adev->device.set_voice_volume = adev_set_voice_volume;
- adev->device.set_master_volume = adev_set_master_volume;
- adev->device.set_mode = adev_set_mode;
- adev->device.set_mic_mute = adev_set_mic_mute;
- adev->device.get_mic_mute = adev_get_mic_mute;
- adev->device.set_parameters = adev_set_parameters;
- adev->device.get_parameters = adev_get_parameters;
- adev->device.get_input_buffer_size = adev_get_input_buffer_size;
- adev->device.open_output_stream = adev_open_output_stream;
- adev->device.close_output_stream = adev_close_output_stream;
- adev->device.open_input_stream = adev_open_input_stream;
- adev->device.close_input_stream = adev_close_input_stream;
- adev->device.dump = adev_dump;
+ adev->hw_device.init_check = adev_init_check;
+ adev->hw_device.set_voice_volume = adev_set_voice_volume;
+ adev->hw_device.set_master_volume = adev_set_master_volume;
+ adev->hw_device.set_mode = adev_set_mode;
+ adev->hw_device.set_mic_mute = adev_set_mic_mute;
+ adev->hw_device.get_mic_mute = adev_get_mic_mute;
+ adev->hw_device.set_parameters = adev_set_parameters;
+ adev->hw_device.get_parameters = adev_get_parameters;
+ adev->hw_device.get_input_buffer_size = adev_get_input_buffer_size;
+ adev->hw_device.open_output_stream = adev_open_output_stream;
+ adev->hw_device.close_output_stream = adev_close_output_stream;
+ adev->hw_device.open_input_stream = adev_open_input_stream;
+ adev->hw_device.close_input_stream = adev_close_input_stream;
+ adev->hw_device.dump = adev_dump;
- adev->mixer = mixer_open(0);
- if (!adev->mixer) {
- LOGE("Failed to open mixer 0\n");
- goto err;
+ adev->cm = init_audio_config();
+ if (!adev->cm) {
+ ret = -EINVAL;
+ goto fail;
}
-
- ret = adev_config_parse(adev);
- if (ret != 0)
- goto err_mixer;
- /* Bootstrap routing */
- pthread_mutex_init(&adev->route_lock, NULL);
- adev->mode = AUDIO_MODE_NORMAL;
- adev->devices = AUDIO_DEVICE_OUT_SPEAKER | AUDIO_DEVICE_IN_BUILTIN_MIC;
- select_devices(adev);
+ if (is_named_stream_defined(adev->cm, "voice recognition")) {
+ ret = init_voice_trigger_service();
+ if (ret != 0) {
+ goto fail;
+ }
+ }
- *device = &adev->device.common;
+ adev->orientation = ORIENTATION_UNDEFINED;
+ *device = &adev->hw_device.common;
return 0;
-err_mixer:
- mixer_close(adev->mixer);
-err:
- return -EINVAL;
+fail:
+ if (adev->cm) {
+ /*free_audio_config(adev->cm);*/ /* Currently broken */
+ }
+
+ free(adev);
+ return ret;
}
static struct hw_module_methods_t hal_module_methods = {
@@ -914,11 +2080,11 @@
struct audio_module HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
- .version_major = 1,
- .version_minor = 0,
+ .module_api_version = AUDIO_MODULE_API_VERSION_0_1,
+ .hal_api_version = HARDWARE_HAL_API_VERSION,
.id = AUDIO_HARDWARE_MODULE_ID,
.name = "TinyHAL",
- .author = "Mark Brown <broonie@opensource.wolfsonmicro.com>",
+ .author = "Richard Fitzgerald <rf@opensource.wolfsonmicro.com>",
.methods = &hal_module_methods,
},
};
diff --git a/voice_trigger.cpp b/voice_trigger.cpp
new file mode 100644
index 0000000..a33781e
--- /dev/null
+++ b/voice_trigger.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 Wolfson Microelectronics plc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "tinyhal-vctrig"
+/*#define LOG_NDEBUG 0*/
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/time.h>
+
+#include <cutils/log.h>
+#include <sysutils/FrameworkListener.h>
+#include <sysutils/FrameworkCommand.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+#include "voice_trigger.h"
+
+static CVoiceTriggerService* TriggerService;
+
+CVoiceTriggerCommand::CVoiceTriggerCommand()
+ : FrameworkCommand("wait")
+{
+}
+
+CVoiceTriggerCommand::~CVoiceTriggerCommand()
+{
+}
+
+int CVoiceTriggerCommand::runCommand(SocketClient *c, int argc, char **argv)
+{
+ return 0;
+}
+
+CVoiceTriggerService::CVoiceTriggerService()
+ : FrameworkListener("voice-trigger", true)
+{
+ registerCmd(&mTriggerCommand);
+}
+
+CVoiceTriggerService::~CVoiceTriggerService()
+{
+}
+
+
+// As the rest of TinyHAL is currently written in C we provide a
+// C interface to the voice trigger service
+
+extern "C"
+void send_voice_trigger()
+{
+ if (TriggerService != NULL) {
+ ALOGV("trigger");
+ TriggerService->sendBroadcast(0, "trig", 0);
+ }
+}
+
+extern "C"
+int init_voice_trigger_service()
+{
+ ALOGV("init_voice_trigger_service");
+
+ CVoiceTriggerService *svc = new CVoiceTriggerService;
+ if (svc->startListener() != 0) {
+ int ret = errno;
+ delete svc;
+ return ret;
+ }
+
+ TriggerService = svc;
+ return 0;
+}
+
+
+
+
diff --git a/voice_trigger.h b/voice_trigger.h
new file mode 100644
index 0000000..43507b3
--- /dev/null
+++ b/voice_trigger.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012-13 Wolfson Microelectronics plc
+ *
+ * This code is heavily based on AOSP HAL for the asus/grouper
+ *
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VOICE_TRIGGER_H
+#define VOICE_TRIGGER_H
+
+#if defined(__cplusplus)
+#include <sysutils/FrameworkListener.h>
+#include <sysutils/FrameworkCommand.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+class CVoiceTriggerCommand : public FrameworkCommand
+{
+public:
+ CVoiceTriggerCommand();
+ virtual ~CVoiceTriggerCommand();
+
+private:
+ int runCommand(SocketClient *c, int argc, char **argv);
+};
+
+class CVoiceTriggerService : public FrameworkListener
+{
+public:
+ CVoiceTriggerService();
+ virtual ~CVoiceTriggerService();
+
+public:
+ CVoiceTriggerCommand mTriggerCommand;
+};
+
+extern "C" {
+#endif
+
+void send_voice_trigger();
+int init_voice_trigger_service();
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif