1 /*
   2  * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 
  26 //#define USE_ERROR
  27 //#define USE_TRACE
  28 //#define USE_VERBOSE_TRACE
  29 
  30 #include <AudioUnit/AudioUnit.h>
  31 #include <CoreServices/CoreServices.h>
  32 #include <AudioToolbox/AudioConverter.h>
  33 #include <pthread.h>
  34 #include <math.h>
  35 /*
  36 #if !defined(__COREAUDIO_USE_FLAT_INCLUDES__)
  37 #include <CoreAudio/CoreAudioTypes.h>
  38 #else
  39 #include <CoreAudioTypes.h>
  40 #endif
  41 */
  42 
  43 #include "PLATFORM_API_MacOSX_Utils.h"
  44 
  45 extern "C" {
  46 #include "Utilities.h"
  47 #include "DirectAudio.h"
  48 }
  49 
  50 #if USE_DAUDIO == TRUE
  51 
  52 
  53 #ifdef USE_TRACE
  54 static void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) {
  55     TRACE4("ID='%c%c%c%c'", (char)(inDesc->mFormatID >> 24), (char)(inDesc->mFormatID >> 16), (char)(inDesc->mFormatID >> 8), (char)(inDesc->mFormatID));
  56     TRACE2(", %f Hz, flags=0x%lX", (float)inDesc->mSampleRate, (long unsigned)inDesc->mFormatFlags);
  57     TRACE2(", %ld channels, %ld bits", (long)inDesc->mChannelsPerFrame, (long)inDesc->mBitsPerChannel);
  58     TRACE1(", %ld bytes per frame\n", (long)inDesc->mBytesPerFrame);
  59 }
  60 #else
  61 static inline void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) { }
  62 #endif
  63 
  64 
  65 #define MAX(x, y)   ((x) >= (y) ? (x) : (y))
  66 #define MIN(x, y)   ((x) <= (y) ? (x) : (y))
  67 
  68 
  69 // =======================================
  70 // MixerProvider functions implementation
  71 
  72 static DeviceList deviceCache;
  73 
  74 INT32 DAUDIO_GetDirectAudioDeviceCount() {
  75     deviceCache.Refresh();
  76     int count = deviceCache.GetCount();
  77     if (count > 0) {
  78         // add "default" device
  79         count++;
  80         TRACE1("DAUDIO_GetDirectAudioDeviceCount: returns %d devices\n", count);
  81     } else {
  82         TRACE0("DAUDIO_GetDirectAudioDeviceCount: no devices found\n");
  83     }
  84     return count;
  85 }
  86 
  87 INT32 DAUDIO_GetDirectAudioDeviceDescription(INT32 mixerIndex, DirectAudioDeviceDescription *desc) {
  88     bool result = true;
  89     desc->deviceID = 0;
  90     if (mixerIndex == 0) {
  91         // default device
  92         strncpy(desc->name, "Default Audio Device", DAUDIO_STRING_LENGTH);
  93         strncpy(desc->description, "Default Audio Device", DAUDIO_STRING_LENGTH);
  94         desc->maxSimulLines = -1;
  95     } else {
  96         AudioDeviceID deviceID;
  97         result = deviceCache.GetDeviceInfo(mixerIndex-1, &deviceID, DAUDIO_STRING_LENGTH,
  98             desc->name, desc->vendor, desc->description, desc->version);
  99         if (result) {
 100             desc->deviceID = (INT32)deviceID;
 101             desc->maxSimulLines = -1;
 102         }
 103     }
 104     return result ? TRUE : FALSE;
 105 }
 106 
 107 
 108 void DAUDIO_GetFormats(INT32 mixerIndex, INT32 deviceID, int isSource, void* creator) {
 109     TRACE3(">>DAUDIO_GetFormats mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (int)deviceID, isSource);
 110 
 111     AudioDeviceID audioDeviceID = deviceID == 0 ? GetDefaultDevice(isSource) : (AudioDeviceID)deviceID;
 112 
 113     if (audioDeviceID == 0) {
 114         return;
 115     }
 116 
 117     int totalChannels = GetChannelCount(audioDeviceID, isSource);
 118 
 119     if (totalChannels == 0) {
 120         TRACE0("<<DAUDIO_GetFormats, no streams!\n");
 121         return;
 122     }
 123 
 124     if (isSource && totalChannels < 2) {
 125         // report 2 channels even if only mono is supported
 126         totalChannels = 2;
 127     }
 128 
 129     int channels[] = {1, 2, totalChannels};
 130     int channelsCount = MIN(totalChannels, 3);
 131 
 132     float hardwareSampleRate = GetSampleRate(audioDeviceID, isSource);
 133     TRACE2("  DAUDIO_GetFormats: got %d channels, sampleRate == %f\n", totalChannels, hardwareSampleRate);
 134 
 135     // any sample rates are supported
 136     float sampleRate = -1;
 137 
 138     static int sampleBits[] = {8, 16, 24};
 139     static int sampleBitsCount = sizeof(sampleBits)/sizeof(sampleBits[0]);
 140 
 141     // the last audio format is the default one (used by DataLine.open() if format is not specified)
 142     // consider as default 16bit PCM stereo (mono is stereo is not supported) with the current sample rate
 143     int defBits = 16;
 144     int defChannels = MIN(2, channelsCount);
 145     float defSampleRate = hardwareSampleRate;
 146     // don't add default format is sample rate is not specified
 147     bool addDefault = defSampleRate > 0;
 148 
 149     // TODO: CoreAudio can handle signed/unsigned, little-endian/big-endian
 150     // TODO: register the formats (to prevent DirectAudio software conversion) - need to fix DirectAudioDevice.createDataLineInfo
 151     // to avoid software conversions if both signed/unsigned or big-/little-endian are supported
 152     for (int channelIndex = 0; channelIndex < channelsCount; channelIndex++) {
 153         for (int bitIndex = 0; bitIndex < sampleBitsCount; bitIndex++) {
 154             int bits = sampleBits[bitIndex];
 155             if (addDefault && bits == defBits && channels[channelIndex] != defChannels && sampleRate == defSampleRate) {
 156                 // the format is the default one, don't add it now
 157                 continue;
 158             }
 159             DAUDIO_AddAudioFormat(creator,
 160                 bits,                       // sample size in bits
 161                 -1,                         // frame size (auto)
 162                 channels[channelIndex],     // channels
 163                 sampleRate,                 // sample rate
 164                 DAUDIO_PCM,                 // only accept PCM
 165                 bits == 8 ? FALSE : TRUE,   // signed
 166                 bits == 8 ? FALSE           // little-endian for 8bit
 167                     : UTIL_IsBigEndianPlatform());
 168         }
 169     }
 170     // add default format
 171     if (addDefault) {
 172         DAUDIO_AddAudioFormat(creator,
 173             defBits,                        // 16 bits
 174             -1,                             // automatically calculate frame size
 175             defChannels,                    // channels
 176             defSampleRate,                  // sample rate
 177             DAUDIO_PCM,                     // PCM
 178             TRUE,                           // signed
 179             UTIL_IsBigEndianPlatform());    // native endianess
 180     }
 181 
 182     TRACE0("<<DAUDIO_GetFormats\n");
 183 }
 184 
 185 
 186 // =======================================
 187 // Source/Target DataLine functions implementation
 188 
 189 // ====
 190 /* 1writer-1reader ring buffer class with flush() support */
 191 class RingBuffer {
 192 public:
 193     RingBuffer() : pBuffer(NULL), nBufferSize(0) {
 194         pthread_mutex_init(&lockMutex, NULL);
 195     }
 196     ~RingBuffer() {
 197         Deallocate();
 198         pthread_mutex_destroy(&lockMutex);
 199     }
 200 
 201     // extraBytes: number of additionally allocated bytes to prevent data
 202     // overlapping when almost whole buffer is filled
 203     // (required only if Write() can override the buffer)
 204     bool Allocate(int requestedBufferSize, int extraBytes) {
 205         int fullBufferSize = requestedBufferSize + extraBytes;
 206         int powerOfTwo = 1;
 207         while (powerOfTwo < fullBufferSize) {
 208             powerOfTwo <<= 1;
 209         }
 210         pBuffer = (Byte*)malloc(powerOfTwo);
 211         if (pBuffer == NULL) {
 212             ERROR0("RingBuffer::Allocate: OUT OF MEMORY\n");
 213             return false;
 214         }
 215 
 216         nBufferSize = requestedBufferSize;
 217         nAllocatedBytes = powerOfTwo;
 218         nPosMask = powerOfTwo - 1;
 219         nWritePos = 0;
 220         nReadPos = 0;
 221         nFlushPos = -1;
 222 
 223         TRACE2("RingBuffer::Allocate: OK, bufferSize=%d, allocated:%d\n", nBufferSize, nAllocatedBytes);
 224         return true;
 225     }
 226 
 227     void Deallocate() {
 228         if (pBuffer) {
 229             free(pBuffer);
 230             pBuffer = NULL;
 231             nBufferSize = 0;
 232         }
 233     }
 234 
 235     inline int GetBufferSize() {
 236         return nBufferSize;
 237     }
 238 
 239     inline int GetAllocatedSize() {
 240         return nAllocatedBytes;
 241     }
 242 
 243     // gets number of bytes available for reading
 244     int GetValidByteCount() {
 245         lock();
 246         INT64 result = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
 247         unlock();
 248         return result > (INT64)nBufferSize ? nBufferSize : (int)result;
 249     }
 250 
 251     int Write(void *srcBuffer, int len, bool preventOverflow) {
 252         lock();
 253         TRACE2("RingBuffer::Write (%d bytes, preventOverflow=%d)\n", len, preventOverflow ? 1 : 0);
 254         TRACE2("  writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
 255         TRACE2("  readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
 256         TRACE2("  flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
 257 
 258         INT64 writePos = nWritePos;
 259         if (preventOverflow) {
 260             INT64 avail_read = writePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
 261             if (avail_read >= (INT64)nBufferSize) {
 262                 // no space
 263                 TRACE0("  preventOverlow: OVERFLOW => len = 0;\n");
 264                 len = 0;
 265             } else {
 266                 int avail_write = nBufferSize - (int)avail_read;
 267                 if (len > avail_write) {
 268                     TRACE2("  preventOverlow: desrease len: %d => %d\n", len, avail_write);
 269                     len = avail_write;
 270                 }
 271             }
 272         }
 273         unlock();
 274 
 275         if (len > 0) {
 276 
 277             write((Byte *)srcBuffer, Pos2Offset(writePos), len);
 278 
 279             lock();
 280             TRACE4("--RingBuffer::Write writePos: %lld (%d) => %lld, (%d)\n",
 281                 (long long)nWritePos, Pos2Offset(nWritePos), (long long)nWritePos + len, Pos2Offset(nWritePos + len));
 282             nWritePos += len;
 283             unlock();
 284         }
 285         return len;
 286     }
 287 
 288     int Read(void *dstBuffer, int len) {
 289         lock();
 290         TRACE1("RingBuffer::Read (%d bytes)\n", len);
 291         TRACE2("  writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
 292         TRACE2("  readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
 293         TRACE2("  flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
 294 
 295         applyFlush();
 296         INT64 avail_read = nWritePos - nReadPos;
 297         // check for overflow
 298         if (avail_read > (INT64)nBufferSize) {
 299             nReadPos = nWritePos - nBufferSize;
 300             avail_read = nBufferSize;
 301             TRACE0("  OVERFLOW\n");
 302         }
 303         INT64 readPos = nReadPos;
 304         unlock();
 305 
 306         if (len > (int)avail_read) {
 307             TRACE2("  RingBuffer::Read - don't have enough data, len: %d => %d\n", len, (int)avail_read);
 308             len = (int)avail_read;
 309         }
 310 
 311         if (len > 0) {
 312 
 313             read((Byte *)dstBuffer, Pos2Offset(readPos), len);
 314 
 315             lock();
 316             if (applyFlush()) {
 317                 // just got flush(), results became obsolete
 318                 TRACE0("--RingBuffer::Read, got Flush, return 0\n");
 319                 len = 0;
 320             } else {
 321                 TRACE4("--RingBuffer::Read readPos: %lld (%d) => %lld (%d)\n",
 322                     (long long)nReadPos, Pos2Offset(nReadPos), (long long)nReadPos + len, Pos2Offset(nReadPos + len));
 323                 nReadPos += len;
 324             }
 325             unlock();
 326         } else {
 327             // underrun!
 328         }
 329         return len;
 330     }
 331 
 332     // returns number of the flushed bytes
 333     int Flush() {
 334         lock();
 335         INT64 flushedBytes = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
 336         nFlushPos = nWritePos;
 337         unlock();
 338         return flushedBytes > (INT64)nBufferSize ? nBufferSize : (int)flushedBytes;
 339     }
 340 
 341 private:
 342     Byte *pBuffer;
 343     int nBufferSize;
 344     int nAllocatedBytes;
 345     INT64 nPosMask;
 346 
 347     pthread_mutex_t lockMutex;
 348 
 349     volatile INT64 nWritePos;
 350     volatile INT64 nReadPos;
 351     // Flush() sets nFlushPos value to nWritePos;
 352     // next Read() sets nReadPos to nFlushPos and resests nFlushPos to -1
 353     volatile INT64 nFlushPos;
 354 
 355     inline void lock() {
 356         pthread_mutex_lock(&lockMutex);
 357     }
 358     inline void unlock() {
 359         pthread_mutex_unlock(&lockMutex);
 360     }
 361 
 362     inline bool applyFlush() {
 363         if (nFlushPos >= 0) {
 364             nReadPos = nFlushPos;
 365             nFlushPos = -1;
 366             return true;
 367         }
 368         return false;
 369     }
 370 
 371     inline int Pos2Offset(INT64 pos) {
 372         return (int)(pos & nPosMask);
 373     }
 374 
 375     void write(Byte *srcBuffer, int dstOffset, int len) {
 376         int dstEndOffset = dstOffset + len;
 377 
 378         int lenAfterWrap = dstEndOffset - nAllocatedBytes;
 379         if (lenAfterWrap > 0) {
 380             // dest.buffer does wrap
 381             len = nAllocatedBytes - dstOffset;
 382             memcpy(pBuffer+dstOffset, srcBuffer, len);
 383             memcpy(pBuffer, srcBuffer+len, lenAfterWrap);
 384         } else {
 385             // dest.buffer does not wrap
 386             memcpy(pBuffer+dstOffset, srcBuffer, len);
 387         }
 388     }
 389 
 390     void read(Byte *dstBuffer, int srcOffset, int len) {
 391         int srcEndOffset = srcOffset + len;
 392 
 393         int lenAfterWrap = srcEndOffset - nAllocatedBytes;
 394         if (lenAfterWrap > 0) {
 395             // need to unwrap data
 396             len = nAllocatedBytes - srcOffset;
 397             memcpy(dstBuffer, pBuffer+srcOffset, len);
 398             memcpy(dstBuffer+len, pBuffer, lenAfterWrap);
 399         } else {
 400             // source buffer is not wrapped
 401             memcpy(dstBuffer, pBuffer+srcOffset, len);
 402         }
 403     }
 404 };
 405 
 406 
 407 class Resampler {
 408 private:
 409     enum {
 410         kResamplerEndOfInputData = 1 // error to interrupt conversion (end of input data)
 411     };
 412 public:
 413     Resampler() : converter(NULL), outBuffer(NULL) { }
 414     ~Resampler() {
 415         if (converter != NULL) {
 416             AudioConverterDispose(converter);
 417         }
 418         if (outBuffer != NULL) {
 419             free(outBuffer);
 420         }
 421     }
 422 
 423     // inFormat & outFormat must be interleaved!
 424     bool Init(const AudioStreamBasicDescription *inFormat, const AudioStreamBasicDescription *outFormat,
 425             int inputBufferSizeInBytes)
 426     {
 427         TRACE0(">>Resampler::Init\n");
 428         TRACE0("  inFormat: ");
 429         PrintStreamDesc(inFormat);
 430         TRACE0("  outFormat: ");
 431         PrintStreamDesc(outFormat);
 432         TRACE1("  inputBufferSize: %d bytes\n", inputBufferSizeInBytes);
 433         OSStatus err;
 434 
 435         if ((outFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && outFormat->mChannelsPerFrame != 1) {
 436             ERROR0("Resampler::Init ERROR: outFormat is non-interleaved\n");
 437             return false;
 438         }
 439         if ((inFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && inFormat->mChannelsPerFrame != 1) {
 440             ERROR0("Resampler::Init ERROR: inFormat is non-interleaved\n");
 441             return false;
 442         }
 443 
 444         memcpy(&asbdIn, inFormat, sizeof(AudioStreamBasicDescription));
 445         memcpy(&asbdOut, outFormat, sizeof(AudioStreamBasicDescription));
 446 
 447         err = AudioConverterNew(inFormat, outFormat, &converter);
 448 
 449         if (err || converter == NULL) {
 450             OS_ERROR1(err, "Resampler::Init (AudioConverterNew), converter=%p", converter);
 451             return false;
 452         }
 453 
 454         // allocate buffer for output data
 455         int maximumInFrames = inputBufferSizeInBytes / inFormat->mBytesPerFrame;
 456         // take into account trailingFrames
 457         AudioConverterPrimeInfo primeInfo = {0, 0};
 458         UInt32 sizePrime = sizeof(primeInfo);
 459         err = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &sizePrime, &primeInfo);
 460         if (err) {
 461             OS_ERROR0(err, "Resampler::Init (get kAudioConverterPrimeInfo)");
 462             // ignore the error
 463         } else {
 464             // the default primeMethod is kConverterPrimeMethod_Normal, so we need only trailingFrames
 465             maximumInFrames += primeInfo.trailingFrames;
 466         }
 467         float outBufferSizeInFrames = (outFormat->mSampleRate / inFormat->mSampleRate) * ((float)maximumInFrames);
 468         // to avoid complex calculation just set outBufferSize as double of the calculated value
 469         outBufferSize = (int)outBufferSizeInFrames * outFormat->mBytesPerFrame * 2;
 470         // safety check - consider 256 frame as the minimum input buffer
 471         int minOutSize = 256 * outFormat->mBytesPerFrame;
 472         if (outBufferSize < minOutSize) {
 473             outBufferSize = minOutSize;
 474         }
 475 
 476         outBuffer = malloc(outBufferSize);
 477 
 478         if (outBuffer == NULL) {
 479             ERROR1("Resampler::Init ERROR: malloc failed (%d bytes)\n", outBufferSize);
 480             AudioConverterDispose(converter);
 481             converter = NULL;
 482             return false;
 483         }
 484 
 485         TRACE1("  allocated: %d bytes for output buffer\n", outBufferSize);
 486 
 487         TRACE0("<<Resampler::Init: OK\n");
 488         return true;
 489     }
 490 
 491     // returns size of the internal output buffer
 492     int GetOutBufferSize() {
 493         return outBufferSize;
 494     }
 495 
 496     // process next part of data (writes resampled data to the ringBuffer without overflow check)
 497     int Process(void *srcBuffer, int len, RingBuffer *ringBuffer) {
 498         int bytesWritten = 0;
 499         TRACE2(">>Resampler::Process: %d bytes, converter = %p\n", len, converter);
 500         if (converter == NULL) {    // sanity check
 501             bytesWritten = ringBuffer->Write(srcBuffer, len, false);
 502         } else {
 503             InputProcData data;
 504             data.pThis = this;
 505             data.data = (Byte *)srcBuffer;
 506             data.dataSize = len;
 507 
 508             OSStatus err;
 509             do {
 510                 AudioBufferList abl;    // by default it contains 1 AudioBuffer
 511                 abl.mNumberBuffers = 1;
 512                 abl.mBuffers[0].mNumberChannels = asbdOut.mChannelsPerFrame;
 513                 abl.mBuffers[0].mDataByteSize   = outBufferSize;
 514                 abl.mBuffers[0].mData           = outBuffer;
 515 
 516                 UInt32 packets = (UInt32)outBufferSize / asbdOut.mBytesPerPacket;
 517 
 518                 TRACE2(">>AudioConverterFillComplexBuffer: request %d packets, provide %d bytes buffer\n",
 519                     (int)packets, (int)abl.mBuffers[0].mDataByteSize);
 520 
 521                 err = AudioConverterFillComplexBuffer(converter, ConverterInputProc, &data, &packets, &abl, NULL);
 522 
 523                 TRACE2("<<AudioConverterFillComplexBuffer: got %d packets (%d bytes)\n",
 524                     (int)packets, (int)abl.mBuffers[0].mDataByteSize);
 525                 if (packets > 0) {
 526                     int bytesToWrite = (int)(packets * asbdOut.mBytesPerPacket);
 527                     bytesWritten += ringBuffer->Write(abl.mBuffers[0].mData, bytesToWrite, false);
 528                 }
 529 
 530                 // if outputBuffer is small to store all available frames,
 531                 // we get noErr here. In the case just continue the conversion
 532             } while (err == noErr);
 533 
 534             if (err != kResamplerEndOfInputData) {
 535                 // unexpected error
 536                 OS_ERROR0(err, "Resampler::Process (AudioConverterFillComplexBuffer)");
 537             }
 538         }
 539         TRACE2("<<Resampler::Process: written %d bytes (converted from %d bytes)\n", bytesWritten, len);
 540 
 541         return bytesWritten;
 542     }
 543 
 544     // resets internal bufferes
 545     void Discontinue() {
 546         TRACE0(">>Resampler::Discontinue\n");
 547         if (converter != NULL) {
 548             AudioConverterReset(converter);
 549         }
 550         TRACE0("<<Resampler::Discontinue\n");
 551     }
 552 
 553 private:
 554     AudioConverterRef converter;
 555 
 556     // buffer for output data
 557     // note that there is no problem if the buffer is not big enough to store
 558     // all converted data - it's only performance issue
 559     void *outBuffer;
 560     int outBufferSize;
 561 
 562     AudioStreamBasicDescription asbdIn;
 563     AudioStreamBasicDescription asbdOut;
 564 
 565     struct InputProcData {
 566         Resampler *pThis;
 567         Byte *data;     // data == NULL means we handle Discontinue(false)
 568         int dataSize;   // == 0 if all data was already provided to the converted of we handle Discontinue(false)
 569     };
 570 
 571     static OSStatus ConverterInputProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets,
 572             AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
 573     {
 574         InputProcData *data = (InputProcData *)inUserData;
 575 
 576         TRACE3("  >>ConverterInputProc: requested %d packets, data contains %d bytes (%d packets)\n",
 577             (int)*ioNumberDataPackets, (int)data->dataSize, (int)(data->dataSize / data->pThis->asbdIn.mBytesPerPacket));
 578         if (data->dataSize == 0) {
 579             // already called & provided all input data
 580             // interrupt conversion by returning error
 581             *ioNumberDataPackets = 0;
 582             TRACE0("  <<ConverterInputProc: returns kResamplerEndOfInputData\n");
 583             return kResamplerEndOfInputData;
 584         }
 585 
 586         ioData->mNumberBuffers = 1;
 587         ioData->mBuffers[0].mNumberChannels = data->pThis->asbdIn.mChannelsPerFrame;
 588         ioData->mBuffers[0].mDataByteSize   = data->dataSize;
 589         ioData->mBuffers[0].mData           = data->data;
 590 
 591         *ioNumberDataPackets = data->dataSize / data->pThis->asbdIn.mBytesPerPacket;
 592 
 593         // all data has been provided to the converter
 594         data->dataSize = 0;
 595 
 596         TRACE1("  <<ConverterInputProc: returns %d packets\n", (int)(*ioNumberDataPackets));
 597         return noErr;
 598     }
 599 
 600 };
 601 
 602 
 603 struct OSX_DirectAudioDevice {
 604     AudioUnit   audioUnit;
 605     RingBuffer  ringBuffer;
 606     AudioStreamBasicDescription asbd;
 607 
 608     // only for target lines
 609     UInt32      inputBufferSizeInBytes;
 610     Resampler   *resampler;
 611     // to detect discontinuity (to reset resampler)
 612     SInt64      lastWrittenSampleTime;
 613 
 614 
 615     OSX_DirectAudioDevice() : audioUnit(NULL), asbd(), resampler(NULL), lastWrittenSampleTime(0) {
 616     }
 617 
 618     ~OSX_DirectAudioDevice() {
 619         if (audioUnit) {
 620             CloseComponent(audioUnit);
 621         }
 622         if (resampler) {
 623             delete resampler;
 624         }
 625     }
 626 };
 627 
 628 static AudioUnit CreateOutputUnit(AudioDeviceID deviceID, int isSource)
 629 {
 630     OSStatus err;
 631     AudioUnit unit;
 632     UInt32 size;
 633 
 634     ComponentDescription desc;
 635     desc.componentType         = kAudioUnitType_Output;
 636     desc.componentSubType      = (deviceID == 0 && isSource) ? kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
 637     desc.componentManufacturer = kAudioUnitManufacturer_Apple;
 638     desc.componentFlags        = 0;
 639     desc.componentFlagsMask    = 0;
 640 
 641     Component comp = FindNextComponent(NULL, &desc);
 642     err = OpenAComponent(comp, &unit);
 643 
 644     if (err) {
 645         OS_ERROR0(err, "CreateOutputUnit:OpenAComponent");
 646         return NULL;
 647     }
 648 
 649     if (!isSource) {
 650         int enableIO = 0;
 651         err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
 652                                     0, &enableIO, sizeof(enableIO));
 653         if (err) {
 654             OS_ERROR0(err, "SetProperty (output EnableIO)");
 655         }
 656         enableIO = 1;
 657         err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
 658                                     1, &enableIO, sizeof(enableIO));
 659         if (err) {
 660             OS_ERROR0(err, "SetProperty (input EnableIO)");
 661         }
 662 
 663         if (!deviceID) {
 664             // get real AudioDeviceID for default input device (macosx current input device)
 665             deviceID = GetDefaultDevice(isSource);
 666             if (!deviceID) {
 667                 CloseComponent(unit);
 668                 return NULL;
 669             }
 670         }
 671     }
 672 
 673     if (deviceID) {
 674         err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
 675                                     0, &deviceID, sizeof(deviceID));
 676         if (err) {
 677             OS_ERROR0(err, "SetProperty (CurrentDevice)");
 678             CloseComponent(unit);
 679             return NULL;
 680         }
 681     }
 682 
 683     return unit;
 684 }
 685 
 686 static OSStatus OutputCallback(void                         *inRefCon,
 687                                AudioUnitRenderActionFlags   *ioActionFlags,
 688                                const AudioTimeStamp         *inTimeStamp,
 689                                UInt32                       inBusNumber,
 690                                UInt32                       inNumberFrames,
 691                                AudioBufferList              *ioData)
 692 {
 693     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
 694 
 695     int nchannels = ioData->mNumberBuffers; // should be always == 1 (interleaved channels)
 696     AudioBuffer *audioBuffer = ioData->mBuffers;
 697 
 698     TRACE3(">>OutputCallback: busNum=%d, requested %d frames (%d bytes)\n",
 699         (int)inBusNumber, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
 700     TRACE3("  abl: %d buffers, buffer[0].channels=%d, buffer.size=%d\n",
 701         nchannels, (int)audioBuffer->mNumberChannels, (int)audioBuffer->mDataByteSize);
 702 
 703     int bytesToRead = inNumberFrames * device->asbd.mBytesPerFrame;
 704     if (bytesToRead > (int)audioBuffer->mDataByteSize) {
 705         TRACE0("--OutputCallback: !!! audioBuffer IS TOO SMALL!!!\n");
 706         bytesToRead = audioBuffer->mDataByteSize / device->asbd.mBytesPerFrame * device->asbd.mBytesPerFrame;
 707     }
 708     int bytesRead = device->ringBuffer.Read(audioBuffer->mData, bytesToRead);
 709     if (bytesRead < bytesToRead) {
 710         // no enough data (underrun)
 711         TRACE2("--OutputCallback: !!! UNDERRUN (read %d bytes of %d)!!!\n", bytesRead, bytesToRead);
 712         // silence the rest
 713         memset((Byte*)audioBuffer->mData + bytesRead, 0, bytesToRead-bytesRead);
 714         bytesRead = bytesToRead;
 715     }
 716 
 717     audioBuffer->mDataByteSize = (UInt32)bytesRead;
 718     // SAFETY: set mDataByteSize for all other AudioBuffer in the AudioBufferList to zero
 719     while (--nchannels > 0) {
 720         audioBuffer++;
 721         audioBuffer->mDataByteSize = 0;
 722     }
 723     TRACE1("<<OutputCallback (returns %d)\n", bytesRead);
 724 
 725     return noErr;
 726 }
 727 
 728 static OSStatus InputCallback(void                          *inRefCon,
 729                               AudioUnitRenderActionFlags    *ioActionFlags,
 730                               const AudioTimeStamp          *inTimeStamp,
 731                               UInt32                        inBusNumber,
 732                               UInt32                        inNumberFrames,
 733                               AudioBufferList               *ioData)
 734 {
 735     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
 736 
 737     TRACE4(">>InputCallback: busNum=%d, timeStamp=%lld, %d frames (%d bytes)\n",
 738         (int)inBusNumber, (long long)inTimeStamp->mSampleTime, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
 739 
 740     AudioBufferList abl;    // by default it contains 1 AudioBuffer
 741     abl.mNumberBuffers = 1;
 742     abl.mBuffers[0].mNumberChannels = device->asbd.mChannelsPerFrame;
 743     abl.mBuffers[0].mDataByteSize   = device->inputBufferSizeInBytes;   // assume this is == (inNumberFrames * device->asbd.mBytesPerFrame)
 744     abl.mBuffers[0].mData           = NULL;     // request for the audioUnit's buffer
 745 
 746     OSStatus err = AudioUnitRender(device->audioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &abl);
 747     if (err) {
 748         OS_ERROR0(err, "<<InputCallback: AudioUnitRender");
 749     } else {
 750         if (device->resampler != NULL) {
 751             // test for discontinuity
 752             // AUHAL starts timestamps at zero, so test if the current timestamp less then the last written
 753             SInt64 sampleTime = inTimeStamp->mSampleTime;
 754             if (sampleTime < device->lastWrittenSampleTime) {
 755                 // discontinuity, reset the resampler
 756                 TRACE2("  InputCallback (RESAMPLED), DISCONTINUITY (%f -> %f)\n",
 757                     (float)device->lastWrittenSampleTime, (float)sampleTime);
 758 
 759                 device->resampler->Discontinue();
 760             } else {
 761                 TRACE2("  InputCallback (RESAMPLED), continuous: lastWrittenSampleTime = %f, sampleTime=%f\n",
 762                     (float)device->lastWrittenSampleTime, (float)sampleTime);
 763             }
 764             device->lastWrittenSampleTime = sampleTime + inNumberFrames;
 765 
 766             int bytesWritten = device->resampler->Process(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, &device->ringBuffer);
 767             TRACE2("<<InputCallback (RESAMPLED, saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
 768         } else {
 769             int bytesWritten = device->ringBuffer.Write(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, false);
 770             TRACE2("<<InputCallback (saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
 771         }
 772     }
 773 
 774     return noErr;
 775 }
 776 
 777 
 778 static void FillASBDForNonInterleavedPCM(AudioStreamBasicDescription& asbd,
 779     float sampleRate, int channels, int sampleSizeInBits, bool isFloat, int isSigned, bool isBigEndian)
 780 {
 781     // FillOutASBDForLPCM cannot produce unsigned integer format
 782     asbd.mSampleRate = sampleRate;
 783     asbd.mFormatID = kAudioFormatLinearPCM;
 784     asbd.mFormatFlags = (isFloat ? kAudioFormatFlagIsFloat : (isSigned ? kAudioFormatFlagIsSignedInteger : 0))
 785         | (isBigEndian ? (kAudioFormatFlagIsBigEndian) : 0)
 786         | kAudioFormatFlagIsPacked;
 787     asbd.mBytesPerPacket = channels * ((sampleSizeInBits + 7) / 8);
 788     asbd.mFramesPerPacket = 1;
 789     asbd.mBytesPerFrame = asbd.mBytesPerPacket;
 790     asbd.mChannelsPerFrame = channels;
 791     asbd.mBitsPerChannel = sampleSizeInBits;
 792 }
 793 
 794 void* DAUDIO_Open(INT32 mixerIndex, INT32 deviceID, int isSource,
 795                   int encoding, float sampleRate, int sampleSizeInBits,
 796                   int frameSize, int channels,
 797                   int isSigned, int isBigEndian, int bufferSizeInBytes)
 798 {
 799     TRACE3(">>DAUDIO_Open: mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (unsigned int)deviceID, isSource);
 800     TRACE3("  sampleRate=%d sampleSizeInBits=%d channels=%d\n", (int)sampleRate, sampleSizeInBits, channels);
 801 #ifdef USE_TRACE
 802     {
 803         AudioDeviceID audioDeviceID = deviceID;
 804         if (audioDeviceID == 0) {
 805             // default device
 806             audioDeviceID = GetDefaultDevice(isSource);
 807         }
 808         char name[256];
 809         OSStatus err = GetAudioObjectProperty(audioDeviceID, kAudioUnitScope_Global, kAudioDevicePropertyDeviceName, 256, &name, 0);
 810         if (err != noErr) {
 811             OS_ERROR1(err, "  audioDeviceID=0x%x, name is N/A:", (int)audioDeviceID);
 812         } else {
 813             TRACE2("  audioDeviceID=0x%x, name=%s\n", (int)audioDeviceID, name);
 814         }
 815     }
 816 #endif
 817 
 818     if (encoding != DAUDIO_PCM) {
 819         ERROR1("<<DAUDIO_Open: ERROR: unsupported encoding (%d)\n", encoding);
 820         return NULL;
 821     }
 822 
 823     OSX_DirectAudioDevice *device = new OSX_DirectAudioDevice();
 824 
 825     AudioUnitScope scope = isSource ? kAudioUnitScope_Input : kAudioUnitScope_Output;
 826     int element = isSource ? 0 : 1;
 827     OSStatus err = noErr;
 828     int extraBufferBytes = 0;
 829 
 830     device->audioUnit = CreateOutputUnit(deviceID, isSource);
 831 
 832     if (!device->audioUnit) {
 833         delete device;
 834         return NULL;
 835     }
 836 
 837     if (!isSource) {
 838         AudioDeviceID actualDeviceID = deviceID != 0 ? deviceID : GetDefaultDevice(isSource);
 839         float hardwareSampleRate = GetSampleRate(actualDeviceID, isSource);
 840         TRACE2("--DAUDIO_Open: sampleRate = %f, hardwareSampleRate=%f\n", sampleRate, hardwareSampleRate);
 841 
 842         if (fabs(sampleRate - hardwareSampleRate) > 1) {
 843             device->resampler = new Resampler();
 844 
 845             // request HAL for Float32 with native endianess
 846             FillASBDForNonInterleavedPCM(device->asbd, hardwareSampleRate, channels, 32, true, false, kAudioFormatFlagsNativeEndian != 0);
 847         } else {
 848             sampleRate = hardwareSampleRate;    // in case sample rates are not exactly equal
 849         }
 850     }
 851 
 852     if (device->resampler == NULL) {
 853         // no resampling, request HAL for the requested format
 854         FillASBDForNonInterleavedPCM(device->asbd, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
 855     }
 856 
 857     err = AudioUnitSetProperty(device->audioUnit, kAudioUnitProperty_StreamFormat, scope, element, &device->asbd, sizeof(device->asbd));
 858     if (err) {
 859         OS_ERROR0(err, "<<DAUDIO_Open set StreamFormat");
 860         delete device;
 861         return NULL;
 862     }
 863 
 864     AURenderCallbackStruct output;
 865     output.inputProc       = isSource ? OutputCallback : InputCallback;
 866     output.inputProcRefCon = device;
 867 
 868     err = AudioUnitSetProperty(device->audioUnit,
 869                                 isSource
 870                                     ? (AudioUnitPropertyID)kAudioUnitProperty_SetRenderCallback
 871                                     : (AudioUnitPropertyID)kAudioOutputUnitProperty_SetInputCallback,
 872                                 kAudioUnitScope_Global, 0, &output, sizeof(output));
 873     if (err) {
 874         OS_ERROR0(err, "<<DAUDIO_Open set RenderCallback");
 875         delete device;
 876         return NULL;
 877     }
 878 
 879     err = AudioUnitInitialize(device->audioUnit);
 880     if (err) {
 881         OS_ERROR0(err, "<<DAUDIO_Open UnitInitialize");
 882         delete device;
 883         return NULL;
 884     }
 885 
 886     if (!isSource) {
 887         // for target lines we need extra bytes in the ringBuffer
 888         // to prevent collisions when InputCallback overrides data on overflow
 889         UInt32 size;
 890         OSStatus err;
 891 
 892         size = sizeof(device->inputBufferSizeInBytes);
 893         err  = AudioUnitGetProperty(device->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global,
 894                                     0, &device->inputBufferSizeInBytes, &size);
 895         if (err) {
 896             OS_ERROR0(err, "<<DAUDIO_Open (TargetDataLine)GetBufferSize\n");
 897             delete device;
 898             return NULL;
 899         }
 900         device->inputBufferSizeInBytes *= device->asbd.mBytesPerFrame;  // convert frames to bytes
 901         extraBufferBytes = (int)device->inputBufferSizeInBytes;
 902     }
 903 
 904     if (device->resampler != NULL) {
 905         // resampler output format is a user requested format (== ringBuffer format)
 906         AudioStreamBasicDescription asbdOut; // ringBuffer format
 907         FillASBDForNonInterleavedPCM(asbdOut, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
 908 
 909         // set resampler input buffer size to the HAL buffer size
 910         if (!device->resampler->Init(&device->asbd, &asbdOut, (int)device->inputBufferSizeInBytes)) {
 911             ERROR0("<<DAUDIO_Open: resampler.Init() FAILED.\n");
 912             delete device;
 913             return NULL;
 914         }
 915         // extra bytes in the ringBuffer (extraBufferBytes) should be equal resampler output buffer size
 916         extraBufferBytes = device->resampler->GetOutBufferSize();
 917     }
 918 
 919     if (!device->ringBuffer.Allocate(bufferSizeInBytes, extraBufferBytes)) {
 920         ERROR0("<<DAUDIO_Open: Ring buffer allocation error\n");
 921         delete device;
 922         return NULL;
 923     }
 924 
 925     TRACE0("<<DAUDIO_Open: OK\n");
 926     return device;
 927 }
 928 
 929 int DAUDIO_Start(void* id, int isSource) {
 930     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 931     TRACE0("DAUDIO_Start\n");
 932 
 933     OSStatus err = AudioOutputUnitStart(device->audioUnit);
 934 
 935     if (err != noErr) {
 936         OS_ERROR0(err, "DAUDIO_Start");
 937     }
 938 
 939     return err == noErr ? TRUE : FALSE;
 940 }
 941 
 942 int DAUDIO_Stop(void* id, int isSource) {
 943     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 944     TRACE0("DAUDIO_Stop\n");
 945 
 946     OSStatus err = AudioOutputUnitStop(device->audioUnit);
 947 
 948     return err == noErr ? TRUE : FALSE;
 949 }
 950 
 951 void DAUDIO_Close(void* id, int isSource) {
 952     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 953     TRACE0("DAUDIO_Close\n");
 954 
 955     delete device;
 956 }
 957 
 958 int DAUDIO_Write(void* id, char* data, int byteSize) {
 959     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 960     TRACE1(">>DAUDIO_Write: %d bytes to write\n", byteSize);
 961 
 962     int result = device->ringBuffer.Write(data, byteSize, true);
 963 
 964     TRACE1("<<DAUDIO_Write: %d bytes written\n", result);
 965     return result;
 966 }
 967 
 968 int DAUDIO_Read(void* id, char* data, int byteSize) {
 969     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 970     TRACE1(">>DAUDIO_Read: %d bytes to read\n", byteSize);
 971 
 972     int result = device->ringBuffer.Read(data, byteSize);
 973 
 974     TRACE1("<<DAUDIO_Read: %d bytes has been read\n", result);
 975     return result;
 976 }
 977 
 978 int DAUDIO_GetBufferSize(void* id, int isSource) {
 979     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 980 
 981     int bufferSizeInBytes = device->ringBuffer.GetBufferSize();
 982 
 983     TRACE1("DAUDIO_GetBufferSize returns %d\n", bufferSizeInBytes);
 984     return bufferSizeInBytes;
 985 }
 986 
 987 int DAUDIO_StillDraining(void* id, int isSource) {
 988     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 989 
 990     int draining = device->ringBuffer.GetValidByteCount() > 0 ? TRUE : FALSE;
 991 
 992     TRACE1("DAUDIO_StillDraining returns %d\n", draining);
 993     return draining;
 994 }
 995 
 996 int DAUDIO_Flush(void* id, int isSource) {
 997     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
 998     TRACE0("DAUDIO_Flush\n");
 999 
1000     device->ringBuffer.Flush();
1001 
1002     return TRUE;
1003 }
1004 
1005 int DAUDIO_GetAvailable(void* id, int isSource) {
1006     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1007 
1008     int bytesInBuffer = device->ringBuffer.GetValidByteCount();
1009     if (isSource) {
1010         return device->ringBuffer.GetBufferSize() - bytesInBuffer;
1011     } else {
1012         return bytesInBuffer;
1013     }
1014 }
1015 
1016 INT64 DAUDIO_GetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1017     OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1018     INT64 position;
1019 
1020     if (isSource) {
1021         position = javaBytePos - device->ringBuffer.GetValidByteCount();
1022     } else {
1023         position = javaBytePos + device->ringBuffer.GetValidByteCount();
1024     }
1025 
1026     TRACE2("DAUDIO_GetBytePosition returns %lld (javaBytePos = %lld)\n", (long long)position, (long long)javaBytePos);
1027     return position;
1028 }
1029 
1030 void DAUDIO_SetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1031     // no need javaBytePos (it's available in DAUDIO_GetBytePosition)
1032 }
1033 
1034 int DAUDIO_RequiresServicing(void* id, int isSource) {
1035     return FALSE;
1036 }
1037 
1038 void DAUDIO_Service(void* id, int isSource) {
1039     // unreachable
1040 }
1041 
1042 #endif  // USE_DAUDIO == TRUE