1 /*
   2  * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 
  26 #ifdef HAVE_CONFIG_H
  27 #include <config.h>
  28 #endif
  29 
  30 #include <stdio.h>
  31 #include <fcntl.h>
  32 #include <unistd.h>
  33 #include <sys/types.h>
  34 #include <sys/stat.h>
  35 
  36 #include "audioconverter.h"
  37 
  38 GST_DEBUG_CATEGORY_STATIC (audioconverter_debug);
  39 #define GST_CAT_DEFAULT audioconverter_debug
  40 
  41 /*
  42  * The input capabilities.
  43  */
  44 #define AUDIOCONVERTER_SINK_CAPS \
  45 "audio/mpeg, " \
  46 "mpegversion = (int) 1, " \
  47 "layer = (int) [ 1, 3 ], " \
  48 "rate = (int) { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }, " \
  49 "channels = (int) [ 1, 2 ]; " \
  50 "audio/mpeg, " \
  51 "mpegversion = (int) {2, 4}"
  52 
  53 static GstStaticPadTemplate sink_factory =
  54 GST_STATIC_PAD_TEMPLATE ("sink",
  55                          GST_PAD_SINK,
  56                          GST_PAD_ALWAYS,
  57                          GST_STATIC_CAPS (AUDIOCONVERTER_SINK_CAPS));
  58 
  59 /*
  60  * The output capabilities.
  61  */
  62 #define AUDIOCONVERTER_SRC_CAPS \
  63 "audio/x-raw-float, " \
  64 "endianness = (int) " G_STRINGIFY (G_LITTLE_ENDIAN) ", " \
  65 "signed = (boolean) true, " \
  66 "width = (int) 32, " \
  67 "depth = (int) 32, " \
  68 "rate = (int) { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }, " \
  69 "channels = (int) [ 1, 2 ]"
  70 
  71 static GstStaticPadTemplate src_factory =
  72 GST_STATIC_PAD_TEMPLATE ("src",
  73                          GST_PAD_SRC,
  74                          GST_PAD_ALWAYS,
  75                          GST_STATIC_CAPS (AUDIOCONVERTER_SRC_CAPS));
  76 
  77 /***********************************************************************************
  78  * Substitution for
  79  * GST_BOILERPLATE (GstMpaDec, gst_mpadec, GstElement, GST_TYPE_ELEMENT);
  80  ***********************************************************************************/
  81 static void audioconverter_base_init  (gpointer g_class);
  82 static void audioconverter_class_init (AudioConverterClass *g_class);
  83 static void audioconverter_init (AudioConverter *object, AudioConverterClass *g_class);
  84 
  85 static GstElementClass *parent_class = NULL;
  86 
  87 static void audioconverter_class_init_trampoline (gpointer g_class, gpointer data)
  88 {
  89     parent_class = (GstElementClass *)  g_type_class_peek_parent (g_class);
  90     audioconverter_class_init ((AudioConverterClass *)g_class);
  91 }
  92 
  93 GType audioconverter_get_type (void)
  94 {
  95     static volatile gsize gonce_data = 0;
  96     // INLINE - g_once_init_enter()
  97     if (g_once_init_enter (&gonce_data))
  98     {
  99         GType _type;
 100         _type = gst_type_register_static_full (GST_TYPE_ELEMENT,
 101                                                g_intern_static_string ("AudioConverter"),
 102                                                sizeof (AudioConverterClass),
 103                                                audioconverter_base_init,
 104                                                NULL,
 105                                                audioconverter_class_init_trampoline,
 106                                                NULL,
 107                                                NULL,
 108                                                sizeof (AudioConverter),
 109                                                0,
 110                                                (GInstanceInitFunc) audioconverter_init,
 111                                                NULL,
 112                                                (GTypeFlags) 0);
 113         g_once_init_leave (&gonce_data, (gsize) _type);
 114     }
 115     return (GType) gonce_data;
 116 }
 117 
 118 /*
 119  * Forward declarations.
 120  */
 121 static GstStateChangeReturn audioconverter_change_state (GstElement* element,
 122                                                          GstStateChange transition);
 123 static gboolean audioconverter_sink_event (GstPad * pad, GstEvent * event);
 124 static GstFlowReturn audioconverter_chain (GstPad * pad, GstBuffer * buf);
 125 static gboolean audioconverter_src_event (GstPad * pad, GstEvent * event);
 126 static gboolean audioconverter_src_query (GstPad * pad, GstQuery* query);
 127 static const GstQueryType * audioconverter_get_src_query_types (GstPad * pad);
 128 static void audioconverter_state_init(AudioConverter *decode);
 129 
 130 static void initAudioFormatPCM(Float64 sampleRate, AudioStreamBasicDescription* outputFormat);
 131 static void propertyListener(void *clientData,
 132                              AudioFileStreamID audioFileStream,
 133                              AudioFileStreamPropertyID propertyID,
 134                              UInt32 *flags);
 135 static void packetListener(void *clientData,
 136                            UInt32 numberBytes,
 137                            UInt32 numberPackets,
 138                            const void *inputData,
 139                            AudioStreamPacketDescription  *packetDescriptions);
 140 static OSStatus retrieveInputData(AudioConverterRef audioConverter,
 141                                   UInt32* numberDataPackets,
 142                                   AudioBufferList* bufferList,
 143                                   AudioStreamPacketDescription** dataPacketDescription,
 144                                   void* userData);
 145 
 146 /* --- GObject vmethod implementations --- */
 147 
 148 static void
 149 audioconverter_base_init (gpointer gclass)
 150 {
 151     GstElementClass *element_class;
 152 
 153     element_class = GST_ELEMENT_CLASS (gclass);
 154 
 155     gst_element_class_set_details_simple(element_class,
 156         "AudioConverter",
 157         "Codec/Decoder/Audio",
 158         "Decode raw MPEG audio stream to mono or stereo-interleaved PCM",
 159         "Oracle Corporation");
 160 
 161     gst_element_class_add_pad_template (element_class,
 162                                         gst_static_pad_template_get (&src_factory));
 163     gst_element_class_add_pad_template (element_class,
 164                                         gst_static_pad_template_get (&sink_factory));
 165 }
 166 
 167 /*
 168  * Initialize mpadec's class.
 169  */
 170 static void
 171 audioconverter_class_init (AudioConverterClass * klass)
 172 {
 173     GstElementClass *gstelement_class = (GstElementClass *) klass;
 174 
 175     gstelement_class->change_state = audioconverter_change_state;
 176 }
 177 
 178 /*
 179  * Initialize the new element.
 180  * Instantiate pads and add them to element.
 181  * Set pad calback functions.
 182  * Initialize instance structure.
 183  */
 184 static void
 185 audioconverter_init (AudioConverter * decode,
 186                      AudioConverterClass * gclass)
 187 {
 188     // Input.
 189     decode->sinkpad = gst_pad_new_from_static_template (&sink_factory, "sink");
 190     if (FALSE == gst_element_add_pad (GST_ELEMENT (decode), decode->sinkpad))
 191         g_warning ("audioconverter element failed to add sink pad!\n");
 192     gst_pad_set_chain_function (decode->sinkpad, GST_DEBUG_FUNCPTR(audioconverter_chain));
 193     gst_pad_set_event_function(decode->sinkpad, audioconverter_sink_event);
 194 
 195     // Output.
 196     decode->srcpad = gst_pad_new_from_static_template (&src_factory, "src");
 197     if (TRUE != gst_element_add_pad (GST_ELEMENT (decode), decode->srcpad))
 198         g_warning ("audioconverter element failed to add source pad!\n");
 199     gst_pad_set_event_function(decode->srcpad, audioconverter_src_event);
 200     gst_pad_set_query_function(decode->srcpad, audioconverter_src_query);
 201     gst_pad_set_query_type_function(decode->srcpad, audioconverter_get_src_query_types);
 202     gst_pad_use_fixed_caps (decode->srcpad);
 203 }
 204 
 205 /* --- GstElement vmethod implementations --- */
 206 
 207 /**
 208  * Initialize the AudioConverter structure. This should happen
 209  * only once, before decoding begins.
 210  */
 211 static void
 212 audioconverter_state_init(AudioConverter *decode)
 213 {
 214     decode->packetDesc = NULL;
 215     decode->inputData = NULL;
 216 
 217     decode->enable_parser = TRUE;
 218 
 219     decode->audioStreamID = NULL;
 220 
 221     decode->cookieSize = 0;
 222     decode->cookieData = NULL;
 223 
 224     decode->audioConverter = NULL;
 225     decode->outPacketDescription = NULL;
 226 
 227     decode->isAudioConverterReady = FALSE;
 228     decode->isFormatInitialized = FALSE;
 229     decode->hasAudioPacketTableInfo = FALSE;
 230 
 231     decode->audioDataPacketCount = 0;
 232     decode->previousDesc = NULL;
 233 
 234     // Flags
 235     decode->is_initialized = FALSE;
 236     decode->has_pad_caps = FALSE;
 237 
 238     // Counters
 239     decode->total_samples = 0;
 240 
 241     // Values
 242     decode->data_format = AUDIOCONVERTER_DATA_FORMAT_NONE;
 243     decode->initial_offset = (guint64)-1;
 244     decode->stream_length = AUDIOCONVERTER_STREAM_LENGTH_UNKNOWN;
 245     decode->duration = AUDIOCONVERTER_DURATION_UNKNOWN;
 246 }
 247 
 248 /**
 249  * Reset the state of the AudioConverter structure. This should happen before
 250  * decoding a new segment.
 251  */
 252 static void
 253 audioconverter_state_reset(AudioConverter *decode)
 254 {
 255     // Buffer cache
 256     if (NULL == decode->packetDesc) {
 257         decode->packetDesc = g_queue_new();
 258     } else if(!g_queue_is_empty(decode->packetDesc)) {
 259         guint queueLength = g_queue_get_length(decode->packetDesc);
 260         int i;
 261         for(i = 0; i < queueLength; i++) {
 262             gpointer p = g_queue_pop_head(decode->packetDesc);
 263             g_free(p);
 264         }
 265     }
 266 
 267     // Input data
 268     if (NULL == decode->inputData) {
 269         decode->inputData = g_array_sized_new(FALSE, FALSE, sizeof(guint8),
 270                                               AUDIOCONVERTER_INITIAL_BUFFER_SIZE);
 271     } else {
 272         decode->inputData = g_array_set_size(decode->inputData, 0);
 273     }
 274     decode->inputOffset = 0;
 275 
 276     // Decoder
 277     if (NULL != decode->audioConverter) {
 278         AudioConverterReset(decode->audioConverter);
 279     }
 280 
 281     // Flags
 282     decode->is_synced = FALSE;
 283     decode->is_discont = TRUE;
 284 
 285     // Counters
 286     decode->total_packets = 0;
 287 
 288     if(NULL != decode->previousDesc) {
 289         g_free(decode->previousDesc);
 290         decode->previousDesc = NULL;
 291     }
 292 }
 293 
 294 /*
 295  * Perform processing needed for state transitions.
 296  */
 297 static GstStateChangeReturn
 298 audioconverter_change_state (GstElement* element, GstStateChange transition)
 299 {
 300     AudioConverter *decode = AUDIOCONVERTER(element);
 301     GstStateChangeReturn ret;
 302 
 303     switch(transition)
 304     {
 305         case GST_STATE_CHANGE_NULL_TO_READY:
 306             audioconverter_state_init(decode);
 307             break;
 308         case GST_STATE_CHANGE_READY_TO_PAUSED:
 309             // Clear the AudioConverter state.
 310             audioconverter_state_reset(decode);
 311             break;
 312         case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
 313             break;
 314         default:
 315             break;
 316     }
 317 
 318     // Change state.
 319     ret = parent_class->change_state(element, transition);
 320     if(GST_STATE_CHANGE_FAILURE == ret)
 321     {
 322         return ret;
 323     }
 324 
 325     switch(transition)
 326     {
 327         case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
 328             break;
 329         case GST_STATE_CHANGE_PAUSED_TO_READY:
 330             // Free all allocated memory.
 331             if(!g_queue_is_empty(decode->packetDesc)) {
 332                 guint queueLength = g_queue_get_length(decode->packetDesc);
 333                 int i;
 334                 for(i = 0; i < queueLength; i++) {
 335                     gpointer p = g_queue_pop_head(decode->packetDesc);
 336                     g_free(p);
 337                 }
 338             }
 339 
 340             g_queue_free(decode->packetDesc);
 341             decode->packetDesc = NULL;
 342 
 343             g_array_free(decode->inputData, TRUE);
 344             decode->inputData = NULL;
 345 
 346             if(NULL != decode->audioStreamID) {
 347                 AudioFileStreamClose(decode->audioStreamID);
 348                 decode->audioStreamID = NULL;
 349             }
 350 
 351             if(NULL != decode->audioConverter) {
 352                 AudioConverterDispose(decode->audioConverter);
 353                 decode->audioConverter = NULL;
 354             }
 355 
 356             if(NULL != decode->cookieData) {
 357                 g_free(decode->cookieData);
 358                 decode->cookieData = NULL;
 359             }
 360 
 361             if(NULL != decode->outPacketDescription) {
 362                 g_free(decode->outPacketDescription);
 363                 decode->outPacketDescription = NULL;
 364             }
 365 
 366             if(NULL != decode->previousDesc) {
 367                 g_free(decode->previousDesc);
 368                 decode->previousDesc = NULL;
 369             }
 370             break;
 371         case GST_STATE_CHANGE_READY_TO_NULL:
 372             break;
 373         default:
 374             break;
 375     }
 376 
 377     return ret;
 378 }
 379 
 380 /*
 381  * Process events received from upstream. The explicitly handled events are
 382  * FLUSH_START, FLUSH_STOP, and NEWSEGMENT; all others are forwarded.
 383  */
 384 static gboolean
 385 audioconverter_sink_event (GstPad * pad, GstEvent * event)
 386 {
 387     gboolean ret;
 388     GstObject *parent = gst_object_get_parent((GstObject*)pad);
 389     AudioConverter *decode = AUDIOCONVERTER(parent);
 390 
 391 #if ENABLE_PRINT_SPEW
 392     g_print("sink event: %s\n", GST_EVENT_TYPE_NAME(event));
 393 #endif
 394     switch (GST_EVENT_TYPE (event))
 395     {
 396         case GST_EVENT_FLUSH_START:
 397         {
 398             // Start flushing buffers.
 399 
 400             // Set flag so chain function refuses new buffers.
 401             decode->is_flushing = TRUE;
 402 
 403             // Push the event downstream.
 404             ret = gst_pad_push_event (decode->srcpad, event);
 405             break;
 406         }
 407 
 408         case GST_EVENT_FLUSH_STOP:
 409         {
 410             // Stop flushing buffers.
 411             audioconverter_state_reset(decode);
 412 
 413             // Unset flag so chain function accepts buffers.
 414             decode->is_flushing = FALSE;
 415 
 416             // Push the event downstream.
 417             ret = gst_pad_push_event (decode->srcpad, event);
 418             break;
 419         }
 420             
 421         case GST_EVENT_EOS:
 422         {
 423             if (decode->is_priming)
 424             {
 425                 gst_element_message_full(GST_ELEMENT(decode), GST_MESSAGE_ERROR, GST_STREAM_ERROR, GST_STREAM_ERROR_DECODE, g_strdup("MP3 file must contain 3 MP3 frames."), NULL, ("audioconverter.c"), ("audioconverter_sink_event"), 0);
 426             }
 427             
 428             // Push the event downstream.
 429             ret = gst_pad_push_event (decode->srcpad, event);
 430             break;
 431         }
 432 
 433         default:
 434             // Push the event downstream.
 435             ret = gst_pad_push_event (decode->srcpad, event);
 436             break;
 437     }
 438 
 439     // Unlock the parent object.
 440     gst_object_unref(parent);
 441 
 442     return ret;
 443 }
 444 
 445 /*
 446  * Process events received from downstream. The only handled event is SEEK and
 447  * that only to convert the event from TIME to BYTE format.
 448  */
 449 static gboolean
 450 audioconverter_src_event (GstPad * pad, GstEvent * event)
 451 {
 452     gboolean result = FALSE;
 453     GstObject *parent = gst_object_get_parent((GstObject*)pad);
 454     AudioConverter *decode = AUDIOCONVERTER(parent);
 455 
 456     if (GST_EVENT_TYPE(event) == GST_EVENT_SEEK)
 457     {
 458         gdouble rate;           // segment rate
 459         GstFormat format;       // format of the seek values
 460         GstSeekFlags flags;     // the seek flags
 461         GstSeekType start_type; // the seek type of the start position
 462         GstSeekType stop_type;  // the seek type of the stop position
 463         gint64 start;           // the seek start position in the given format
 464         gint64 stop;            // the seek stop position in the given format
 465 
 466         // Get seek description from the event.
 467         gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start, &stop_type, &stop);
 468         if (format == GST_FORMAT_TIME)
 469         {
 470             gint64 start_byte = 0;
 471             GstFormat format = GST_FORMAT_BYTES;
 472             if (gst_pad_query_peer_convert(decode->sinkpad, GST_FORMAT_TIME, start, &format, &start_byte))
 473             {
 474                 result = gst_pad_push_event(decode->sinkpad,
 475                                             gst_event_new_seek(rate, GST_FORMAT_BYTES,
 476                                                                (GstSeekFlags)(GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE),
 477                                                                GST_SEEK_TYPE_SET, start_byte,
 478                                                                GST_SEEK_TYPE_NONE, 0));
 479                 if (result)
 480                 {
 481                     // INLINE - gst_event_unref()
 482                     gst_event_unref (event);
 483                 }
 484             }
 485             if (!result) {
 486                 SInt64 absolutePacketOffset = start / decode->frame_duration;
 487                 SInt64 absoluteByteOffset;
 488                 UInt32 flags = 0;
 489                 if(noErr == AudioFileStreamSeek(decode->audioStreamID, absolutePacketOffset,
 490                                                 &absoluteByteOffset, &flags)) {
 491                     start_byte = (gint64)absoluteByteOffset;
 492                     result = gst_pad_push_event(decode->sinkpad,
 493                                                 gst_event_new_seek(rate, GST_FORMAT_BYTES,
 494                                                                    (GstSeekFlags)(GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE),
 495                                                                    GST_SEEK_TYPE_SET, start_byte,
 496                                                                    GST_SEEK_TYPE_NONE, 0));
 497                     if (result)
 498                     {
 499                         // INLINE - gst_event_unref()
 500                         gst_event_unref (event);
 501                     }
 502                 }
 503             }
 504         }
 505     }
 506 
 507     // Push the event upstream only if it was not processed.
 508     if (!result)
 509         result = gst_pad_push_event(decode->sinkpad, event);
 510 
 511     // Unlock the parent object.
 512     gst_object_unref(parent);
 513 
 514     return result;
 515 }
 516 
 517 static const GstQueryType *
 518 audioconverter_get_src_query_types (GstPad * pad)
 519 {
 520     static const GstQueryType audioconverter_src_query_types[] = {
 521         GST_QUERY_POSITION,
 522         GST_QUERY_DURATION,
 523         0
 524     };
 525 
 526     return audioconverter_src_query_types;
 527 }
 528 
 529 static gboolean
 530 audioconverter_src_query (GstPad * pad, GstQuery * query)
 531 {
 532     // Set flag indicating that the query has not been handled.
 533     gboolean result = FALSE;
 534     GstObject *parent = gst_object_get_parent((GstObject*)pad);
 535     AudioConverter *decode = AUDIOCONVERTER(parent);
 536     GstFormat format;
 537     gint64 value;
 538 
 539     switch (GST_QUERY_TYPE (query))
 540     {
 541         case GST_QUERY_DURATION:
 542         {
 543 #if ENABLE_PRINT_SPEW
 544             g_print("Duration query\n");
 545 #endif
 546 
 547             // Do not handle query if the stream offset is unknown.
 548             if ((guint64)-1 == decode->initial_offset) {
 549                 // Unref the parent object.
 550                 gst_object_unref(parent);
 551                 return FALSE;
 552             }
 553 
 554             // Get the format required by the query.
 555             gst_query_parse_duration(query, &format, NULL);
 556 
 557             // Handled time-valued query.
 558             if (format == GST_FORMAT_TIME) {
 559                 if(AUDIOCONVERTER_DURATION_UNKNOWN != decode->duration) {
 560 #if ENABLE_PRINT_SPEW
 561                     g_print("STORED DURATION\n");
 562 #endif
 563                     gst_query_set_duration(query, GST_FORMAT_TIME, decode->duration);
 564                     result = TRUE;
 565                 } else if (gst_pad_query_peer_duration(decode->sinkpad, &format, &value) &&
 566                            format == GST_FORMAT_TIME) {
 567                     // Get the duration from the sinkpad.
 568                     gst_query_set_duration(query, GST_FORMAT_TIME, value);
 569                     decode->duration = value;
 570                     result = TRUE;
 571 #if ENABLE_PRINT_SPEW
 572                     g_print("SINK PAD TIME DURATION\n");
 573 #endif
 574                 } else {
 575                     GstFormat fmt = GST_FORMAT_BYTES;
 576                     gint64 data_length;
 577                     if (gst_pad_query_peer_duration(decode->sinkpad, &fmt, &data_length)) {
 578                         data_length -= decode->initial_offset;
 579 
 580                         fmt = GST_FORMAT_TIME;
 581                         if (gst_pad_query_peer_convert(decode->sinkpad, GST_FORMAT_BYTES, data_length, &fmt, &value)) {
 582 #if ENABLE_PRINT_SPEW
 583                             g_print("SINK PAD BYTE DURATION\n");
 584 #endif
 585                             gst_query_set_duration(query, GST_FORMAT_TIME, value);
 586                             decode->duration = value;
 587                             result = TRUE;
 588                         }
 589                     }
 590                 }
 591             }
 592             break;
 593         }
 594 
 595         case GST_QUERY_POSITION:
 596         {
 597             // Get the format required by the query.
 598             gst_query_parse_position(query, &format, NULL);
 599 
 600             // Handle time-valued query if the decoder is initialized.
 601             if(format == GST_FORMAT_TIME && decode->is_initialized)
 602             {
 603                 // Use the sampling rate to convert sample offset to time.
 604                 value = gst_util_uint64_scale_int(decode->total_samples,
 605                                                   GST_SECOND,
 606                                                   decode->sampling_rate);
 607 
 608                 // Set the position on the query object.
 609                 gst_query_set_position(query, format, value);
 610 
 611                 // Set flag indicating that the query has been handled.
 612                 result = TRUE;
 613             }
 614         }
 615 
 616         default:
 617             break;
 618     }
 619 
 620     // Use default query if flag indicates query not handled.
 621     if(result == FALSE)
 622     {
 623         result = gst_pad_query_default(pad, query);
 624     }
 625 
 626     // Unref the parent object.
 627     gst_object_unref(parent);
 628 
 629     return result;
 630 }
 631 
 632 /*
 633  * Processes a buffer of MPEG audio data pushed to the sink pad.
 634  */
 635 static GstFlowReturn
 636 audioconverter_chain (GstPad * pad, GstBuffer * buf)
 637 {
 638     AudioConverter *decode = AUDIOCONVERTER(GST_OBJECT_PARENT(pad));
 639     GstFlowReturn ret      = GST_FLOW_OK;
 640     guint8 *buf_data       = GST_BUFFER_DATA(buf);
 641     guint buf_size         = GST_BUFFER_SIZE(buf);
 642     GstClockTime buf_time  = GST_BUFFER_TIMESTAMP(buf);
 643 
 644     // If between FLUSH_START and FLUSH_STOP, reject new buffers.
 645     if (decode->is_flushing)
 646     {
 647         // Unref the input buffer.
 648         // INLINE - gst_buffer_unref()
 649         gst_buffer_unref(buf);
 650 
 651         return GST_FLOW_WRONG_STATE;
 652     }
 653 
 654     // Reset state on discont buffer if not after FLUSH_STOP.
 655     if (GST_BUFFER_IS_DISCONT(buf) && TRUE == decode->is_synced) {
 656         audioconverter_state_reset(decode);
 657     }
 658 
 659     if (decode->enable_parser && NULL == decode->audioStreamID) {
 660         AudioFileTypeID audioStreamTypeHint = kAudioFileM4AType;
 661 
 662         // Try to set a better parser hint from the sink pad caps.
 663         GstCaps* sink_peer_caps = gst_pad_peer_get_caps(decode->sinkpad);
 664         if(NULL != sink_peer_caps) {
 665             if(gst_caps_get_size(sink_peer_caps) > 0) {
 666                 GstStructure* caps_struct = gst_caps_get_structure(sink_peer_caps, 0);
 667                 if(NULL != caps_struct) {
 668                     const gchar* struct_name = gst_structure_get_name(caps_struct);
 669                     if(NULL != struct_name) {
 670                         if(0 == strcmp(struct_name, "audio/mpeg")) {
 671                             gint mpegversion;
 672                             if(!gst_structure_get_int(caps_struct, "mpegversion", &mpegversion)) {
 673                                 mpegversion = 1;
 674                             }
 675 
 676                             if(4 == mpegversion &&
 677                                NULL != gst_structure_get_value (caps_struct, "codec_data")) {
 678                                 decode->enable_parser = FALSE;
 679                                 decode->data_format = AUDIOCONVERTER_DATA_FORMAT_AAC;
 680 
 681                                 const GValue* codec_data_value = gst_structure_get_value (caps_struct, "codec_data");
 682                                 GstBuffer* codec_data_buf = gst_value_get_buffer (codec_data_value);
 683                                 guint8* codec_data = GST_BUFFER_DATA(codec_data_buf);
 684                                 guint codec_data_size = GST_BUFFER_SIZE(codec_data_buf);
 685 
 686                                 //
 687                                 // Get the number of channels from the Audio Specific Config
 688                                 // which is what is passed in "codec_data"
 689                                 //
 690                                 // Ref: http://wiki.multimedia.cx/index.php?title=MPEG-4_Audio
 691                                 //
 692                                 guint8 channel_config = 0;
 693                                 if (codec_data_size >= 2) {
 694                                     guint8 freq_index = (codec_data[0]&0x07) << 1 | (codec_data[1]&0x80) >> 7;
 695                                     if (15 == freq_index) {
 696                                         if(codec_data_size >= 5) {
 697                                             channel_config = (codec_data[4]&0x78) >> 3;
 698                                         }
 699                                     } else {
 700                                         channel_config = (codec_data[1]&0x78) >> 3;
 701                                     }
 702                                 }
 703 
 704                                 const GValue* esds_value = gst_structure_get_value (caps_struct, "esds_data");
 705                                 if(esds_value) {
 706                                     gint rate;
 707                                     if(!gst_structure_get_int(caps_struct, "rate", &rate)) {
 708                                         rate = 44100;
 709                                     }
 710 
 711                                     gint channels;
 712                                     if(!gst_structure_get_int(caps_struct, "channels", &channels)) {
 713                                         channels = 2;
 714                                     }
 715 
 716                                     GstBuffer* esds_buf = gst_value_get_buffer (esds_value);
 717                                     guint8* esds_data = GST_BUFFER_DATA(esds_buf);
 718                                     guint esds_size = GST_BUFFER_SIZE(esds_buf);
 719 
 720                                     decode->sampling_rate = rate;
 721                                     if (channel_config > 0 && channel_config < 7) {
 722                                         decode->num_channels = channel_config;
 723                                     } else if (7 == channel_config) {
 724                                         decode->num_channels = 8;
 725                                     } else {
 726                                         decode->num_channels = channels;
 727                                     }
 728                                     decode->samples_per_frame = 1024; // XXX Note: AAC-LC has 960 spf
 729 
 730                                     decode->audioInputFormat.mSampleRate = decode->sampling_rate;
 731                                     decode->audioInputFormat.mFormatID = kAudioFormatMPEG4AAC;
 732                                     decode->audioInputFormat.mFramesPerPacket = decode->samples_per_frame;
 733                                     decode->audioInputFormat.mChannelsPerFrame = decode->num_channels;
 734 
 735                                     initAudioFormatPCM(decode->audioInputFormat.mSampleRate,
 736                                                        &decode->audioOutputFormat);
 737 
 738                                     decode->cookieSize = esds_size - AUDIOCONVERTER_AAC_ESDS_HEADER_SIZE;
 739                                     decode->cookieData = g_malloc0(decode->cookieSize);
 740                                     if(NULL != decode->cookieData) {
 741                                         memcpy(decode->cookieData,
 742                                                esds_data + AUDIOCONVERTER_AAC_ESDS_HEADER_SIZE,
 743                                                decode->cookieSize);
 744                                     }
 745 
 746                                     decode->isFormatInitialized = TRUE;
 747                                     decode->isAudioConverterReady = TRUE;
 748                                 } else {
 749                                     // Unref the input buffer.
 750                                     // INLINE - gst_buffer_unref()
 751                                     gst_buffer_unref(buf);
 752 
 753                                     gst_caps_unref(sink_peer_caps);
 754 
 755                                     return GST_FLOW_ERROR;
 756                                 }
 757                             } else {
 758                                 gint layer;
 759                                 if(gst_structure_get_int(caps_struct, "layer", &layer)) {
 760                                     switch(layer) {
 761                                         case 1:
 762                                             audioStreamTypeHint = kAudioFileMP1Type;
 763                                             break;
 764                                         case 2:
 765                                             audioStreamTypeHint = kAudioFileMP2Type;
 766                                             break;
 767                                         case 3:
 768                                         default:
 769                                             audioStreamTypeHint = kAudioFileMP3Type;
 770                                             break;
 771                                     }
 772                                 } else {
 773                                     audioStreamTypeHint = kAudioFileM4AType;
 774                                 }
 775                             }
 776                         }
 777                     }
 778                 }
 779             }
 780             gst_caps_unref(sink_peer_caps);
 781         }
 782 
 783         if(decode->enable_parser) {
 784             if(noErr != AudioFileStreamOpen((void*)decode,
 785                                             propertyListener,
 786                                             packetListener,
 787                                             audioStreamTypeHint,
 788                                             &decode->audioStreamID)) {
 789                 // Unref the input buffer.
 790                 // INLINE - gst_buffer_unref()
 791                 gst_buffer_unref(buf);
 792 
 793 #if ENABLE_PRINT_SPEW
 794                 g_print("AudioFileStreamOpen failed\n");
 795 #endif
 796                 return GST_FLOW_ERROR;
 797             }
 798         }
 799     }
 800 
 801     if(decode->enable_parser) {
 802         guint32 parserFlags;
 803         if(!decode->isAudioConverterReady) {
 804             parserFlags = 0;
 805         } else {
 806             //parserFlags = decode->is_synced ? 0 : kAudioFileStreamParseFlag_Discontinuity;
 807             if(decode->is_synced) {
 808                 parserFlags = 0;
 809             } else {
 810                 parserFlags = kAudioFileStreamParseFlag_Discontinuity;
 811                 AudioConverterReset(decode->audioConverter);
 812             }
 813         }
 814 
 815         OSStatus result = AudioFileStreamParseBytes(decode->audioStreamID, buf_size, buf_data, parserFlags);
 816 
 817         // Unref the input buffer.
 818         // INLINE - gst_buffer_unref()
 819         gst_buffer_unref(buf);
 820 
 821         if(noErr != result) {
 822 #if ENABLE_PRINT_SPEW
 823             g_print("AudioFileStreamParseBytes %d\n", result);
 824 #endif
 825             return GST_FLOW_ERROR;
 826         }
 827     } else {
 828         if(!decode->is_synced && NULL != decode->audioConverter) {
 829             AudioConverterReset(decode->audioConverter);
 830         }
 831 
 832         AudioStreamPacketDescription packetDescriptions;
 833         packetDescriptions.mDataByteSize = buf_size;
 834         packetDescriptions.mStartOffset = 0;
 835         packetDescriptions.mVariableFramesInPacket = 0;
 836 
 837         packetListener((void*)decode, buf_size, 1, (const void*)buf_data,
 838                        &packetDescriptions);
 839 
 840         // Unref the input buffer.
 841         // INLINE - gst_buffer_unref()
 842         gst_buffer_unref(buf);
 843     }
 844 
 845     // Return without pushing a buffer if format not derived from stream parser.
 846     if(!decode->isFormatInitialized) {
 847         return GST_FLOW_OK;
 848     }
 849 
 850     // Return without pushing a buffer if format is MPEG audio but no packets are enqueued.
 851     if(AUDIOCONVERTER_DATA_FORMAT_MPA == decode->data_format && 0 == decode->total_packets) {
 852         return GST_FLOW_OK;
 853     }
 854 
 855     if(decode->is_synced == FALSE) {
 856         // Set flags.
 857         gboolean is_first_frame = !decode->is_initialized;
 858         decode->is_initialized = TRUE;
 859         decode->is_synced = TRUE;
 860         decode->is_priming = TRUE;
 861 
 862         // Save frame description.
 863         decode->sampling_rate = (guint)decode->audioInputFormat.mSampleRate;
 864         decode->samples_per_frame = decode->audioInputFormat.mFramesPerPacket;
 865         decode->frame_duration = (guint)(GST_SECOND*
 866                                          (double)decode->samples_per_frame/
 867                                          (double)decode->sampling_rate);
 868 
 869         if(is_first_frame) {
 870             // Allocate memory for output packet descriptions.
 871             decode->outPacketDescription = g_malloc(decode->samples_per_frame*sizeof(AudioStreamPacketDescription));
 872             if(NULL == decode->outPacketDescription) {
 873                 return GST_FLOW_ERROR;
 874             }
 875 
 876             // Save first frame offset.
 877             if (GST_BUFFER_OFFSET_IS_VALID(buf)) {
 878                 decode->initial_offset = GST_BUFFER_OFFSET(buf);
 879             } else {
 880                 decode->initial_offset = 0;
 881             }
 882 
 883             // Query for the stream length if it was not set from a header.
 884             if (AUDIOCONVERTER_STREAM_LENGTH_UNKNOWN == decode->stream_length)
 885             {
 886                 GstFormat sink_format = GST_FORMAT_BYTES;
 887                 gint64 sink_length;
 888 
 889                 if (gst_pad_query_peer_duration(decode->sinkpad, &sink_format, &sink_length))
 890                 {
 891                     decode->stream_length = sink_length;
 892                 }
 893             }
 894         }
 895 
 896         // Derive sample count using the timestamp.
 897         guint64 frame_index = buf_time/decode->frame_duration;
 898         decode->total_samples = frame_index * decode->samples_per_frame;
 899 
 900 
 901         // Set the sink and source pad caps if not already done.
 902         if (TRUE != decode->has_pad_caps)
 903         {
 904             GstCaps* caps = NULL;
 905 
 906             if(AUDIOCONVERTER_DATA_FORMAT_MPA == decode->data_format) {
 907                 // Determine the layer.
 908                 gint layer;
 909                 switch(decode->audioInputFormat.mFormatID) {
 910                     case kAudioFormatMPEGLayer1:
 911                         layer = 1;
 912                         break;
 913                     case kAudioFormatMPEGLayer2:
 914                         layer = 2;
 915                         break;
 916                     case kAudioFormatMPEGLayer3:
 917                         layer = 3;
 918                         break;
 919                     default:
 920                         layer = 3;
 921                         break;
 922                 }
 923 
 924                 // Sink caps: MPEG audio.
 925                 caps = gst_caps_new_simple ("audio/mpeg",
 926                                             "version", G_TYPE_INT, 1,
 927                                             "layer", G_TYPE_INT, layer,
 928                                             "rate", G_TYPE_INT, (gint)decode->sampling_rate,
 929                                             "channels", G_TYPE_INT, (gint)decode->num_channels,
 930                                             NULL);
 931             } else if(AUDIOCONVERTER_DATA_FORMAT_AAC == decode->data_format) {
 932                 caps = gst_caps_new_simple ("audio/mpeg",
 933                                             "mpegversion", G_TYPE_INT, 2,
 934                                              NULL);
 935             } else {
 936                 return GST_FLOW_ERROR;
 937             }
 938 
 939             if(gst_pad_set_caps (decode->sinkpad, caps) == FALSE)
 940             {
 941 #if ENABLE_PRINT_SPEW
 942                 g_print("WARNING: COULD NOT SET sinkpad CAPS\n");
 943 #endif
 944             }
 945 #if ENABLE_PRINT_SPEW
 946             g_print("sink_caps %s\n", gst_caps_to_string(caps));
 947 #endif
 948 
 949             gst_caps_unref (caps);
 950             caps = NULL;
 951 
 952             // Source caps: PCM audio.
 953 
 954             // Create the source caps.
 955             caps = gst_caps_new_simple ("audio/x-raw-float",
 956                                         "rate", G_TYPE_INT, (gint)decode->sampling_rate,
 957                                         "channels", G_TYPE_INT,
 958                                         decode->audioOutputFormat.mChannelsPerFrame, // may not equal num_channels
 959                                         "endianness", G_TYPE_INT, G_LITTLE_ENDIAN,
 960                                         "width", G_TYPE_INT, 32,
 961                                         "depth", G_TYPE_INT, 32,
 962                                         "signed", G_TYPE_BOOLEAN, TRUE,
 963                                         NULL);
 964 
 965             // Set the source caps.
 966             if(gst_pad_set_caps (decode->srcpad, caps) == FALSE)
 967             {
 968 #if ENABLE_PRINT_SPEW
 969                 g_print("WARNING: COULD NOT SET srcpad CAPS\n");
 970 #endif
 971             }
 972 #if ENABLE_PRINT_SPEW
 973             g_print("src_caps %s\n", gst_caps_to_string(caps));
 974 #endif
 975 
 976             gst_caps_unref (caps);
 977             caps = NULL;
 978 
 979             // Set the source caps flag.
 980             decode->has_pad_caps = TRUE;
 981         }
 982     }
 983 
 984     if(!decode->isAudioConverterReady) {
 985         // Return without pushing a buffer if converter is not ready.
 986         return GST_FLOW_OK;
 987     } else if(NULL == decode->audioConverter) {
 988         // Initialize the converter.
 989         if(noErr != AudioConverterNew(&decode->audioInputFormat,
 990                                       &decode->audioOutputFormat,
 991                                       &decode->audioConverter)) {
 992 #if ENABLE_PRINT_SPEW
 993             g_print("Failed to initialize AudioConverter\n");
 994 #endif
 995             // Return an error if converter cannot be initialized.
 996             return GST_FLOW_ERROR;
 997         } else if(NULL != decode->cookieData && noErr != AudioConverterSetProperty(decode->audioConverter,
 998                                                                             kAudioConverterDecompressionMagicCookie,
 999                                                                             decode->cookieSize, decode->cookieData)) {
1000 #if ENABLE_PRINT_SPEW
1001             g_print("Failed to set AudioConverter magic cookie data\n");
1002 #endif
1003             // Return an error if converter cannot be initialized.
1004             return GST_FLOW_ERROR;
1005         } else if(AUDIOCONVERTER_DATA_FORMAT_AAC == decode->data_format) {
1006             AudioConverterPrimeInfo primeInfo;
1007             primeInfo.leadingFrames = 0;
1008             primeInfo.trailingFrames = 0;
1009             AudioConverterSetProperty(decode->audioConverter, kAudioConverterPrimeInfo,
1010                                       sizeof(primeInfo),
1011                                       &primeInfo);
1012         }
1013     }
1014 
1015     // Decoder priming (MPEG audio only).
1016     if(decode->is_priming &&
1017        //AUDIOCONVERTER_DATA_FORMAT_MPA == decode->data_format &&
1018        decode->total_packets >= AUDIOCONVERTER_MPEG_MIN_PACKETS) {
1019         // Turn off priming if enough packets are enqueued.
1020         decode->is_priming = FALSE;
1021     }
1022 
1023     if(decode->is_priming) {
1024         // Return without pushing a buffer if there are not enough packets enqueued.
1025         if(g_queue_get_length(decode->packetDesc) < AUDIOCONVERTER_MPEG_MIN_PACKETS) {
1026             return GST_FLOW_OK;
1027         } else {
1028             decode->is_priming = FALSE;
1029         }
1030     }
1031 
1032     // Drain the packet queue.
1033     while(!g_queue_is_empty(decode->packetDesc)) {
1034         UInt32 outputDataPacketSize = decode->samples_per_frame;
1035 
1036         guint outbuf_size = outputDataPacketSize*decode->audioOutputFormat.mBytesPerPacket;
1037         GstBuffer *outbuf = NULL;
1038         ret = gst_pad_alloc_buffer_and_set_caps (decode->srcpad, GST_BUFFER_OFFSET_NONE,
1039                                                  outbuf_size,
1040                                                  GST_PAD_CAPS(decode->srcpad), &outbuf);
1041 
1042         // Bail out on error.
1043         if(ret != GST_FLOW_OK)
1044         {
1045             if (ret != GST_FLOW_WRONG_STATE)
1046             {
1047                 gst_element_message_full(GST_ELEMENT(decode), GST_MESSAGE_ERROR, GST_CORE_ERROR, GST_CORE_ERROR_SEEK, g_strdup("Decoded audio buffer allocation failed"), NULL, ("audioconverter.c"), ("audioconverter_chain"), 0);
1048             }
1049 
1050             return ret;
1051         }
1052 
1053         AudioBufferList outputData;
1054         outputData.mNumberBuffers = 1;
1055         outputData.mBuffers[0].mNumberChannels = decode->audioOutputFormat.mChannelsPerFrame;
1056         outputData.mBuffers[0].mDataByteSize = (UInt32)outputDataPacketSize*decode->audioOutputFormat.mBytesPerFrame;
1057         outputData.mBuffers[0].mData = GST_BUFFER_DATA(outbuf);
1058         OSStatus err = AudioConverterFillComplexBuffer(decode->audioConverter,
1059                                                        retrieveInputData,
1060                                                        (void*)decode,
1061                                                        &outputDataPacketSize,
1062                                                        &outputData,
1063                                                        decode->outPacketDescription);
1064         if(noErr != err) {
1065 #if ENABLE_PRINT_SPEW
1066             g_print("AudioConverterFillComplexBuffer err: %u\n", err);
1067 #endif
1068             // INLINE - gst_buffer_unref()
1069             gst_buffer_unref(outbuf);
1070             return GST_FLOW_ERROR;
1071         }
1072 
1073         if(0 == outputDataPacketSize) {
1074             // INLINE - gst_buffer_unref()
1075             gst_buffer_unref(outbuf);
1076             break;
1077         }
1078 
1079         // Calculate the timestamp from the sample count and rate.
1080         guint64 timestamp = gst_util_uint64_scale_int(decode->total_samples,
1081                                                       GST_SECOND,
1082                                                       decode->sampling_rate);
1083 
1084         // Set output buffer properties.
1085         GST_BUFFER_TIMESTAMP(outbuf) = timestamp;
1086         GST_BUFFER_DURATION(outbuf) = decode->frame_duration;
1087         GST_BUFFER_SIZE(outbuf) = outputDataPacketSize*decode->audioOutputFormat.mBytesPerPacket;
1088         GST_BUFFER_OFFSET(outbuf) = decode->total_samples;
1089         GST_BUFFER_OFFSET_END(outbuf) = (decode->total_samples += outputDataPacketSize);
1090         if(decode->is_discont)
1091         {
1092             GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
1093             decode->is_discont = FALSE;
1094         }
1095 
1096         ret = gst_pad_push (decode->srcpad, outbuf);
1097         if(GST_FLOW_OK != ret) {
1098             return ret;
1099         }
1100     }
1101 
1102     // Remove processed bytes from the buffer cache.
1103     if(decode->inputOffset != 0)
1104     {
1105         decode->inputData = g_array_remove_range(decode->inputData, 0,
1106                                                  decode->inputOffset <= decode->inputData->len ?
1107                                                  decode->inputOffset : decode->inputData->len);
1108         decode->inputOffset = 0;
1109     }
1110 
1111     return GST_FLOW_OK;
1112 }
1113 
1114 #if ENABLE_PRINT_SPEW
1115 static void printStreamDesc (AudioStreamBasicDescription* d) {
1116     g_print ("%lf %d %d %d %d %d %d %d %d\n",
1117             d->mSampleRate,
1118             d->mFormatID,
1119             d->mFormatFlags,
1120             d->mBytesPerPacket,
1121             d->mFramesPerPacket,
1122             d->mBytesPerFrame,
1123             d->mChannelsPerFrame,
1124             d->mBitsPerChannel,
1125             d->mReserved);
1126 }
1127 #endif
1128 
1129 // AudioStream and AudioConverter functions
1130 static void initAudioFormatPCM(Float64 sampleRate,
1131                                AudioStreamBasicDescription* outputFormat) {
1132     outputFormat->mSampleRate = sampleRate;
1133     outputFormat->mFormatID = kAudioFormatLinearPCM;
1134     outputFormat->mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
1135     outputFormat->mBytesPerPacket = 8;
1136     outputFormat->mFramesPerPacket = 1;
1137     outputFormat->mBytesPerFrame = 8;
1138     outputFormat->mChannelsPerFrame = 2;
1139     outputFormat->mBitsPerChannel = 32;
1140     outputFormat->mReserved = 0;
1141 }
1142 
1143 static void propertyListener(void *clientData,
1144                              AudioFileStreamID audioFileStream,
1145                              AudioFileStreamPropertyID propertyID,
1146                              UInt32 *flags) {
1147     AudioConverter* decode = (AudioConverter*)clientData;
1148     UInt32 propertyDataSize;
1149     UInt32 isReady;
1150     Boolean isCookieWritable;
1151 
1152     switch(propertyID) {
1153         case kAudioFileStreamProperty_ReadyToProducePackets:
1154 #if ENABLE_PRINT_SPEW
1155             g_print ("kAudioFileStreamProperty_ReadyToProducePackets\n");
1156 #endif
1157             propertyDataSize = sizeof(isReady);
1158             AudioFileStreamGetProperty(audioFileStream, propertyID,
1159                                        &propertyDataSize, &isReady);
1160             if(1 == isReady && TRUE == decode->isFormatInitialized) {
1161                 decode->isAudioConverterReady = TRUE;
1162                 if(decode->hasAudioPacketTableInfo) {
1163                     UInt64 numFrames = decode->packetTableInfo.mNumberValidFrames;
1164                     Float64 sampleRate = decode->audioInputFormat.mSampleRate;
1165                     decode->duration = (gint64)(numFrames/sampleRate*GST_SECOND + 0.5);
1166 #if ENABLE_PRINT_SPEW
1167                     g_print("duration: %ld\n", decode->duration);
1168 #endif
1169                 }
1170             }
1171             break;
1172         case kAudioFileStreamProperty_FileFormat:
1173 #if ENABLE_PRINT_SPEW
1174             g_print ("kAudioFileStreamProperty_FileFormat\n");
1175 #endif
1176             break;
1177         case kAudioFileStreamProperty_DataFormat:
1178 #if ENABLE_PRINT_SPEW
1179             g_print ("kAudioFileStreamProperty_DataFormat\n");
1180 #endif
1181             propertyDataSize = sizeof(decode->audioInputFormat);
1182             AudioFileStreamGetProperty(audioFileStream, propertyID,
1183                                        &propertyDataSize, &decode->audioInputFormat);
1184 #if ENABLE_PRINT_SPEW
1185             printStreamDesc(&decode->audioInputFormat);
1186 #endif
1187             switch(decode->audioInputFormat.mFormatID) {
1188                 case kAudioFormatMPEGLayer1:
1189                 case kAudioFormatMPEGLayer2:
1190                 case kAudioFormatMPEGLayer3:
1191                     decode->data_format = AUDIOCONVERTER_DATA_FORMAT_MPA;
1192                     break;
1193                 case kAudioFormatMPEG4AAC:
1194                     decode->data_format = AUDIOCONVERTER_DATA_FORMAT_AAC;
1195                     break;
1196             }
1197             decode->sampling_rate = decode->audioInputFormat.mSampleRate;
1198             decode->samples_per_frame = decode->audioInputFormat.mFramesPerPacket;
1199             decode->num_channels = decode->audioInputFormat.mChannelsPerFrame;
1200             initAudioFormatPCM(decode->audioInputFormat.mSampleRate, &decode->audioOutputFormat);
1201             decode->isFormatInitialized = TRUE;
1202             break;
1203         case kAudioFileStreamProperty_FormatList:
1204 #if ENABLE_PRINT_SPEW
1205             g_print ("kAudioFileStreamProperty_FormatList\n");
1206 #endif
1207             break;
1208         case kAudioFileStreamProperty_MagicCookieData:
1209 #if ENABLE_PRINT_SPEW
1210             g_print ("kAudioFileStreamProperty_MagicCookieData\n");
1211 #endif
1212             if(AudioFileStreamGetPropertyInfo(audioFileStream, kAudioFileStreamProperty_MagicCookieData,
1213                                               &decode->cookieSize, &isCookieWritable)) {
1214                 decode->cookieSize = 0;
1215             }
1216 
1217             if(decode->cookieSize > 0) {
1218                 decode->cookieData = g_malloc0(decode->cookieSize);
1219                 if(NULL != decode->cookieData) {
1220                     if(AudioFileStreamGetProperty(audioFileStream, kAudioFileStreamProperty_MagicCookieData,
1221                                                   &decode->cookieSize, decode->cookieData)) {
1222                         decode->cookieData = NULL;
1223                     }
1224                 }
1225             }
1226             break;
1227         case kAudioFileStreamProperty_AudioDataByteCount:
1228 #if ENABLE_PRINT_SPEW
1229             g_print ("kAudioFileStreamProperty_AudioDataByteCount\n");
1230 #endif
1231             break;
1232         case kAudioFileStreamProperty_AudioDataPacketCount:
1233 #if ENABLE_PRINT_SPEW
1234             g_print ("kAudioFileStreamProperty_AudioDataPacketCount\n");
1235 #endif
1236             propertyDataSize = 8;
1237             AudioFileStreamGetProperty(audioFileStream, propertyID,
1238                                        &propertyDataSize, &decode->audioDataPacketCount);
1239 #if ENABLE_PRINT_SPEW
1240             g_print (">>> audioDataPacketCount: %llu\n", decode->audioDataPacketCount);
1241 #endif
1242             break;
1243         case kAudioFileStreamProperty_MaximumPacketSize:
1244 #if ENABLE_PRINT_SPEW
1245             g_print ("kAudioFileStreamProperty_MaximumPacketSize\n");
1246 #endif
1247             break;
1248         case kAudioFileStreamProperty_DataOffset:
1249 #if ENABLE_PRINT_SPEW
1250             g_print ("kAudioFileStreamProperty_DataOffset\n");
1251 #endif
1252             break;
1253         case kAudioFileStreamProperty_ChannelLayout:
1254 #if ENABLE_PRINT_SPEW
1255             g_print ("kAudioFileStreamProperty_ChannelLayout\n");
1256 #endif
1257             break;
1258         case kAudioFileStreamProperty_PacketTableInfo:
1259 #if ENABLE_PRINT_SPEW
1260             g_print ("kAudioFileStreamProperty_PacketTableInfo\n");
1261 #endif
1262             propertyDataSize = sizeof(AudioFilePacketTableInfo);
1263             if(noErr == AudioFileStreamGetProperty(audioFileStream, propertyID,
1264                                                    &propertyDataSize, &decode->packetTableInfo)) {
1265                 decode->hasAudioPacketTableInfo = TRUE;
1266             }
1267 #if ENABLE_PRINT_SPEW
1268             g_print("valid frames %d priming frames %d remainder frames %d\n",
1269                     (int)decode->packetTableInfo.mNumberValidFrames,
1270                     decode->packetTableInfo.mPrimingFrames,
1271                     decode->packetTableInfo.mRemainderFrames);
1272 #endif
1273             break;
1274         case kAudioFileStreamProperty_PacketSizeUpperBound:
1275 #if ENABLE_PRINT_SPEW
1276             g_print ("kAudioFileStreamProperty_PacketSizeUpperBound\n");
1277 #endif
1278             break;
1279         case kAudioFileStreamProperty_AverageBytesPerPacket:
1280 #if ENABLE_PRINT_SPEW
1281             g_print ("kAudioFileStreamProperty_AverageBytesPerPacket\n");
1282 #endif
1283             break;
1284         case kAudioFileStreamProperty_BitRate:
1285 #if ENABLE_PRINT_SPEW
1286             g_print ("kAudioFileStreamProperty_BitRate\n");
1287 #endif
1288             break;
1289         default:
1290 #if ENABLE_PRINT_SPEW
1291             g_print("propertyID: %d\n", propertyID);
1292 #endif
1293             break;
1294     }
1295 }
1296 
1297 static void packetListener(void *clientData,
1298                            UInt32 numberBytes,
1299                            UInt32 numberPackets,
1300                            const void *inputData,
1301                            AudioStreamPacketDescription  *packetDescriptions) {
1302     AudioConverter* decode = (AudioConverter*)clientData;
1303 
1304     int i;
1305     for(i = 0; i < numberPackets; i++) {
1306         decode->total_packets++;
1307         decode->inputData = g_array_append_vals(decode->inputData,
1308                                                 inputData + packetDescriptions[i].mStartOffset,
1309                                                 packetDescriptions[i].mDataByteSize);
1310         AudioStreamPacketDescription* packetDesc = g_malloc(sizeof(AudioStreamPacketDescription));
1311         *packetDesc = packetDescriptions[i];
1312         g_queue_push_tail(decode->packetDesc, packetDesc);
1313     }
1314 }
1315 
1316 OSStatus retrieveInputData(AudioConverterRef                audioConverter,
1317                            UInt32*                          numberDataPackets,
1318                            AudioBufferList*                 bufferList,
1319                            AudioStreamPacketDescription**   dataPacketDescription,
1320                            void*                            userData) {
1321     AudioConverter* decode = (AudioConverter*)userData;
1322 
1323     if(!g_queue_is_empty(decode->packetDesc)) {
1324         guint numPackets;
1325         if(*numberDataPackets <= g_queue_get_length(decode->packetDesc)) {
1326             numPackets = *numberDataPackets;
1327         } else {
1328             numPackets = g_queue_get_length(decode->packetDesc);
1329         }
1330 
1331         if (NULL != dataPacketDescription) {
1332             *dataPacketDescription = g_malloc(numPackets*sizeof(AudioStreamPacketDescription));
1333             if(NULL == dataPacketDescription) {
1334                 return kAudioConverterErr_UnspecifiedError;
1335             }
1336             if(NULL != decode->previousDesc) {
1337                 g_free(decode->previousDesc);
1338             }
1339             decode->previousDesc = *dataPacketDescription;
1340         }
1341 
1342         int i;
1343         for(i = 0; i < numPackets; i++) {
1344             bufferList->mBuffers[i].mData = decode->inputData->data + decode->inputOffset;
1345             AudioStreamPacketDescription* packetDesc = g_queue_pop_head(decode->packetDesc);
1346             decode->inputOffset += packetDesc->mDataByteSize;
1347             bufferList->mBuffers[i].mDataByteSize = packetDesc->mDataByteSize;
1348             bufferList->mBuffers[i].mNumberChannels = decode->audioOutputFormat.mChannelsPerFrame;
1349 
1350             if (NULL != dataPacketDescription) {
1351                 dataPacketDescription[i]->mStartOffset = 0;
1352                 dataPacketDescription[i]->mVariableFramesInPacket = packetDesc->mVariableFramesInPacket;
1353                 dataPacketDescription[i]->mDataByteSize = packetDesc->mDataByteSize;
1354             }
1355             g_free(packetDesc);
1356         }
1357         *numberDataPackets = numPackets;
1358     } else {
1359         *numberDataPackets = 0;
1360     }
1361 
1362     return 0;
1363 }
1364 
1365 // --------------------------------------------------------------------------
1366 gboolean audioconverter_plugin_init (GstPlugin * audioconverter)
1367 {
1368     /* debug category for fltering log messages
1369      *
1370      * exchange the string 'Template audioconverter' with your description
1371      */
1372     GST_DEBUG_CATEGORY_INIT (audioconverter_debug, "audioconverter",
1373                              0, "Template audioconverter");
1374 
1375     gboolean reg_result = gst_element_register (audioconverter, "audioconverter",
1376                                                 512, TYPE_AUDIOCONVERTER);
1377 
1378     return reg_result;
1379 }