Signed-off-by: Huw Davies <huw(a)codeweavers.com>
---
dlls/winecoreaudio.drv/coreaudio.c | 464 +++++++++++++++++++++++++++++
dlls/winecoreaudio.drv/mmdevdrv.c | 304 +++----------------
dlls/winecoreaudio.drv/unixlib.h | 20 ++
3 files changed, 527 insertions(+), 261 deletions(-)
diff --git a/dlls/winecoreaudio.drv/coreaudio.c b/dlls/winecoreaudio.drv/coreaudio.c
index f3af24f80fb..3ab80efc4e2 100644
--- a/dlls/winecoreaudio.drv/coreaudio.c
+++ b/dlls/winecoreaudio.drv/coreaudio.c
@@ -85,6 +85,30 @@ static HRESULT osstatus_to_hresult(OSStatus sc)
return E_FAIL;
}
+/* copied from kernelbase */
+static int muldiv( int a, int b, int c )
+{
+ LONGLONG ret;
+
+ if (!c) return -1;
+
+ /* We want to deal with a positive divisor to simplify the logic. */
+ if (c < 0)
+ {
+ a = -a;
+ c = -c;
+ }
+
+ /* If the result is positive, we "add" to round. else, we subtract to round. */
+ if ((a < 0 && b < 0) || (a >= 0 && b >= 0))
+ ret = (((LONGLONG)a * b) + (c / 2)) / c;
+ else
+ ret = (((LONGLONG)a * b) - (c / 2)) / c;
+
+ if (ret > 2147483647 || ret < -2147483647) return -1;
+ return ret;
+}
+
static AudioObjectPropertyScope get_scope(EDataFlow flow)
{
return (flow == eRender) ? kAudioDevicePropertyScopeOutput : kAudioDevicePropertyScopeInput;
@@ -241,7 +265,447 @@ static NTSTATUS get_endpoint_ids(void *args)
return STATUS_SUCCESS;
}
+static WAVEFORMATEX *clone_format(const WAVEFORMATEX *fmt)
+{
+ WAVEFORMATEX *ret;
+ size_t size;
+
+ if(fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
+ size = sizeof(WAVEFORMATEXTENSIBLE);
+ else
+ size = sizeof(WAVEFORMATEX);
+
+ ret = malloc(size);
+ if(!ret)
+ return NULL;
+
+ memcpy(ret, fmt, size);
+
+ ret->cbSize = size - sizeof(WAVEFORMATEX);
+
+ return ret;
+}
+
+static void silence_buffer(struct coreaudio_stream *stream, BYTE *buffer, UINT32 frames)
+{
+ WAVEFORMATEXTENSIBLE *fmtex = (WAVEFORMATEXTENSIBLE*)stream->fmt;
+ if((stream->fmt->wFormatTag == WAVE_FORMAT_PCM ||
+ (stream->fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
+ IsEqualGUID(&fmtex->SubFormat, &KSDATAFORMAT_SUBTYPE_PCM))) &&
+ stream->fmt->wBitsPerSample == 8)
+ memset(buffer, 128, frames * stream->fmt->nBlockAlign);
+ else
+ memset(buffer, 0, frames * stream->fmt->nBlockAlign);
+}
+
+/* CA is pulling data from us */
+static OSStatus ca_render_cb(void *user, AudioUnitRenderActionFlags *flags,
+ const AudioTimeStamp *ts, UInt32 bus, UInt32 nframes,
+ AudioBufferList *data)
+{
+ struct coreaudio_stream *stream = user;
+ UINT32 to_copy_bytes, to_copy_frames, chunk_bytes, lcl_offs_bytes;
+
+ OSSpinLockLock(&stream->lock);
+
+ if(stream->playing){
+ lcl_offs_bytes = stream->lcl_offs_frames * stream->fmt->nBlockAlign;
+ to_copy_frames = min(nframes, stream->held_frames);
+ to_copy_bytes = to_copy_frames * stream->fmt->nBlockAlign;
+
+ chunk_bytes = (stream->bufsize_frames - stream->lcl_offs_frames) * stream->fmt->nBlockAlign;
+
+ if(to_copy_bytes > chunk_bytes){
+ memcpy(data->mBuffers[0].mData, stream->local_buffer + lcl_offs_bytes, chunk_bytes);
+ memcpy(((BYTE *)data->mBuffers[0].mData) + chunk_bytes, stream->local_buffer, to_copy_bytes - chunk_bytes);
+ }else
+ memcpy(data->mBuffers[0].mData, stream->local_buffer + lcl_offs_bytes, to_copy_bytes);
+
+ stream->lcl_offs_frames += to_copy_frames;
+ stream->lcl_offs_frames %= stream->bufsize_frames;
+ stream->held_frames -= to_copy_frames;
+ }else
+ to_copy_bytes = to_copy_frames = 0;
+
+ if(nframes > to_copy_frames)
+ silence_buffer(stream, ((BYTE *)data->mBuffers[0].mData) + to_copy_bytes, nframes - to_copy_frames);
+
+ OSSpinLockUnlock(&stream->lock);
+
+ return noErr;
+}
+
+static void ca_wrap_buffer(BYTE *dst, UINT32 dst_offs, UINT32 dst_bytes,
+ BYTE *src, UINT32 src_bytes)
+{
+ UINT32 chunk_bytes = dst_bytes - dst_offs;
+
+ if(chunk_bytes < src_bytes){
+ memcpy(dst + dst_offs, src, chunk_bytes);
+ memcpy(dst, src + chunk_bytes, src_bytes - chunk_bytes);
+ }else
+ memcpy(dst + dst_offs, src, src_bytes);
+}
+
+/* we need to trigger CA to pull data from the device and give it to us
+ *
+ * raw data from CA is stored in cap_buffer, possibly via wrap_buffer
+ *
+ * raw data is resampled from cap_buffer into resamp_buffer in period-size
+ * chunks and copied to local_buffer
+ */
+static OSStatus ca_capture_cb(void *user, AudioUnitRenderActionFlags *flags,
+ const AudioTimeStamp *ts, UInt32 bus, UInt32 nframes,
+ AudioBufferList *data)
+{
+ struct coreaudio_stream *stream = user;
+ AudioBufferList list;
+ OSStatus sc;
+ UINT32 cap_wri_offs_frames;
+
+ OSSpinLockLock(&stream->lock);
+
+ cap_wri_offs_frames = (stream->cap_offs_frames + stream->cap_held_frames) % stream->cap_bufsize_frames;
+
+ list.mNumberBuffers = 1;
+ list.mBuffers[0].mNumberChannels = stream->fmt->nChannels;
+ list.mBuffers[0].mDataByteSize = nframes * stream->fmt->nBlockAlign;
+
+ if(!stream->playing || cap_wri_offs_frames + nframes > stream->cap_bufsize_frames){
+ if(stream->wrap_bufsize_frames < nframes){
+ free(stream->wrap_buffer);
+ stream->wrap_buffer = malloc(list.mBuffers[0].mDataByteSize);
+ stream->wrap_bufsize_frames = nframes;
+ }
+
+ list.mBuffers[0].mData = stream->wrap_buffer;
+ }else
+ list.mBuffers[0].mData = stream->cap_buffer + cap_wri_offs_frames * stream->fmt->nBlockAlign;
+
+ sc = AudioUnitRender(stream->unit, flags, ts, bus, nframes, &list);
+ if(sc != noErr){
+ OSSpinLockUnlock(&stream->lock);
+ return sc;
+ }
+
+ if(stream->playing){
+ if(list.mBuffers[0].mData == stream->wrap_buffer){
+ ca_wrap_buffer(stream->cap_buffer,
+ cap_wri_offs_frames * stream->fmt->nBlockAlign,
+ stream->cap_bufsize_frames * stream->fmt->nBlockAlign,
+ stream->wrap_buffer, list.mBuffers[0].mDataByteSize);
+ }
+
+ stream->cap_held_frames += list.mBuffers[0].mDataByteSize / stream->fmt->nBlockAlign;
+ if(stream->cap_held_frames > stream->cap_bufsize_frames){
+ stream->cap_offs_frames += stream->cap_held_frames % stream->cap_bufsize_frames;
+ stream->cap_offs_frames %= stream->cap_bufsize_frames;
+ stream->cap_held_frames = stream->cap_bufsize_frames;
+ }
+ }
+
+ OSSpinLockUnlock(&stream->lock);
+ return noErr;
+}
+
+static AudioComponentInstance get_audiounit(EDataFlow dataflow, AudioDeviceID adevid)
+{
+ AudioComponentInstance unit;
+ AudioComponent comp;
+ AudioComponentDescription desc;
+ OSStatus sc;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.componentType = kAudioUnitType_Output;
+ desc.componentSubType = kAudioUnitSubType_HALOutput;
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+
+ if(!(comp = AudioComponentFindNext(NULL, &desc))){
+ WARN("AudioComponentFindNext failed\n");
+ return NULL;
+ }
+
+ sc = AudioComponentInstanceNew(comp, &unit);
+ if(sc != noErr){
+ WARN("AudioComponentInstanceNew failed: %x\n", (int)sc);
+ return NULL;
+ }
+
+ if(dataflow == eCapture){
+ UInt32 enableio;
+
+ enableio = 1;
+ sc = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input, 1, &enableio, sizeof(enableio));
+ if(sc != noErr){
+ WARN("Couldn't enable I/O on input element: %x\n", (int)sc);
+ AudioComponentInstanceDispose(unit);
+ return NULL;
+ }
+
+ enableio = 0;
+ sc = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output, 0, &enableio, sizeof(enableio));
+ if(sc != noErr){
+ WARN("Couldn't disable I/O on output element: %x\n", (int)sc);
+ AudioComponentInstanceDispose(unit);
+ return NULL;
+ }
+ }
+
+ sc = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global, 0, &adevid, sizeof(adevid));
+ if(sc != noErr){
+ WARN("Couldn't set audio unit device\n");
+ AudioComponentInstanceDispose(unit);
+ return NULL;
+ }
+
+ return unit;
+}
+
+static void dump_adesc(const char *aux, AudioStreamBasicDescription *desc)
+{
+ TRACE("%s: mSampleRate: %f\n", aux, desc->mSampleRate);
+ TRACE("%s: mBytesPerPacket: %u\n", aux, (unsigned int)desc->mBytesPerPacket);
+ TRACE("%s: mFramesPerPacket: %u\n", aux, (unsigned int)desc->mFramesPerPacket);
+ TRACE("%s: mBytesPerFrame: %u\n", aux, (unsigned int)desc->mBytesPerFrame);
+ TRACE("%s: mChannelsPerFrame: %u\n", aux, (unsigned int)desc->mChannelsPerFrame);
+ TRACE("%s: mBitsPerChannel: %u\n", aux, (unsigned int)desc->mBitsPerChannel);
+}
+
+static HRESULT ca_get_audiodesc(AudioStreamBasicDescription *desc,
+ const WAVEFORMATEX *fmt)
+{
+ const WAVEFORMATEXTENSIBLE *fmtex = (const WAVEFORMATEXTENSIBLE *)fmt;
+
+ desc->mFormatFlags = 0;
+
+ if(fmt->wFormatTag == WAVE_FORMAT_PCM ||
+ (fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
+ IsEqualGUID(&fmtex->SubFormat, &KSDATAFORMAT_SUBTYPE_PCM))){
+ desc->mFormatID = kAudioFormatLinearPCM;
+ if(fmt->wBitsPerSample > 8)
+ desc->mFormatFlags = kAudioFormatFlagIsSignedInteger;
+ }else if(fmt->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
+ (fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
+ IsEqualGUID(&fmtex->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))){
+ desc->mFormatID = kAudioFormatLinearPCM;
+ desc->mFormatFlags = kAudioFormatFlagIsFloat;
+ }else if(fmt->wFormatTag == WAVE_FORMAT_MULAW ||
+ (fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
+ IsEqualGUID(&fmtex->SubFormat, &KSDATAFORMAT_SUBTYPE_MULAW))){
+ desc->mFormatID = kAudioFormatULaw;
+ }else if(fmt->wFormatTag == WAVE_FORMAT_ALAW ||
+ (fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
+ IsEqualGUID(&fmtex->SubFormat, &KSDATAFORMAT_SUBTYPE_ALAW))){
+ desc->mFormatID = kAudioFormatALaw;
+ }else
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+
+ desc->mSampleRate = fmt->nSamplesPerSec;
+ desc->mBytesPerPacket = fmt->nBlockAlign;
+ desc->mFramesPerPacket = 1;
+ desc->mBytesPerFrame = fmt->nBlockAlign;
+ desc->mChannelsPerFrame = fmt->nChannels;
+ desc->mBitsPerChannel = fmt->wBitsPerSample;
+ desc->mReserved = 0;
+
+ return S_OK;
+}
+
+static HRESULT ca_setup_audiounit(EDataFlow dataflow, AudioComponentInstance unit,
+ const WAVEFORMATEX *fmt, AudioStreamBasicDescription *dev_desc,
+ AudioConverterRef *converter)
+{
+ OSStatus sc;
+ HRESULT hr;
+
+ if(dataflow == eCapture){
+ AudioStreamBasicDescription desc;
+ UInt32 size;
+ Float64 rate;
+ fenv_t fenv;
+ BOOL fenv_stored = TRUE;
+
+ hr = ca_get_audiodesc(&desc, fmt);
+ if(FAILED(hr))
+ return hr;
+ dump_adesc("requested", &desc);
+
+ /* input-only units can't perform sample rate conversion, so we have to
+ * set up our own AudioConverter to support arbitrary sample rates. */
+ size = sizeof(*dev_desc);
+ sc = AudioUnitGetProperty(unit, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, 1, dev_desc, &size);
+ if(sc != noErr){
+ WARN("Couldn't get unit format: %x\n", (int)sc);
+ return osstatus_to_hresult(sc);
+ }
+ dump_adesc("hardware", dev_desc);
+
+ rate = dev_desc->mSampleRate;
+ *dev_desc = desc;
+ dev_desc->mSampleRate = rate;
+
+ dump_adesc("final", dev_desc);
+ sc = AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output, 1, dev_desc, sizeof(*dev_desc));
+ if(sc != noErr){
+ WARN("Couldn't set unit format: %x\n", (int)sc);
+ return osstatus_to_hresult(sc);
+ }
+
+ /* AudioConverterNew requires divide-by-zero SSE exceptions to be masked */
+ if(feholdexcept(&fenv)){
+ WARN("Failed to store fenv state\n");
+ fenv_stored = FALSE;
+ }
+
+ sc = AudioConverterNew(dev_desc, &desc, converter);
+
+ if(fenv_stored && fesetenv(&fenv))
+ WARN("Failed to restore fenv state\n");
+
+ if(sc != noErr){
+ WARN("Couldn't create audio converter: %x\n", (int)sc);
+ return osstatus_to_hresult(sc);
+ }
+ }else{
+ hr = ca_get_audiodesc(dev_desc, fmt);
+ if(FAILED(hr))
+ return hr;
+
+ dump_adesc("final", dev_desc);
+ sc = AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, 0, dev_desc, sizeof(*dev_desc));
+ if(sc != noErr){
+ WARN("Couldn't set format: %x\n", (int)sc);
+ return osstatus_to_hresult(sc);
+ }
+ }
+
+ return S_OK;
+}
+
+static NTSTATUS create_stream(void *args)
+{
+ struct create_stream_params *params = args;
+ struct coreaudio_stream *stream = calloc(1, sizeof(*stream));
+ AURenderCallbackStruct input;
+ OSStatus sc;
+
+ if(!stream){
+ params->result = E_OUTOFMEMORY;
+ return STATUS_SUCCESS;
+ }
+
+ stream->fmt = clone_format(params->fmt);
+ if(!stream->fmt){
+ params->result = E_OUTOFMEMORY;
+ goto end;
+ }
+
+ stream->period_ms = params->period / 10000;
+ stream->period_frames = muldiv(params->period, stream->fmt->nSamplesPerSec, 10000000);
+ stream->dev_id = params->dev_id;
+ stream->flow = params->flow;
+ stream->share = params->share;
+
+ stream->bufsize_frames = muldiv(params->duration, stream->fmt->nSamplesPerSec, 10000000);
+ if(params->share == AUDCLNT_SHAREMODE_EXCLUSIVE)
+ stream->bufsize_frames -= stream->bufsize_frames % stream->period_frames;
+
+ if(!(stream->unit = get_audiounit(stream->flow, stream->dev_id))){
+ params->result = AUDCLNT_E_DEVICE_INVALIDATED;
+ goto end;
+ }
+
+ params->result = ca_setup_audiounit(stream->flow, stream->unit, stream->fmt, &stream->dev_desc, &stream->converter);
+ if(FAILED(params->result)) goto end;
+
+ input.inputProcRefCon = stream;
+ if(stream->flow == eCapture){
+ input.inputProc = ca_capture_cb;
+ sc = AudioUnitSetProperty(stream->unit, kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Output, 1, &input, sizeof(input));
+ }else{
+ input.inputProc = ca_render_cb;
+ sc = AudioUnitSetProperty(stream->unit, kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input, 0, &input, sizeof(input));
+ }
+ if(sc != noErr){
+ WARN("Couldn't set callback: %x\n", (int)sc);
+ params->result = osstatus_to_hresult(sc);
+ goto end;
+ }
+
+ sc = AudioUnitInitialize(stream->unit);
+ if(sc != noErr){
+ WARN("Couldn't initialize: %x\n", (int)sc);
+ params->result = osstatus_to_hresult(sc);
+ goto end;
+ }
+
+ /* we play audio continuously because AudioOutputUnitStart sometimes takes
+ * a while to return */
+ sc = AudioOutputUnitStart(stream->unit);
+ if(sc != noErr){
+ WARN("Unit failed to start: %x\n", (int)sc);
+ params->result = osstatus_to_hresult(sc);
+ goto end;
+ }
+
+ stream->local_buffer_size = stream->bufsize_frames * stream->fmt->nBlockAlign;
+ if(NtAllocateVirtualMemory(GetCurrentProcess(), (void **)&stream->local_buffer, 0, &stream->local_buffer_size,
+ MEM_COMMIT, PAGE_READWRITE)){
+ params->result = E_OUTOFMEMORY;
+ goto end;
+ }
+ silence_buffer(stream, stream->local_buffer, stream->bufsize_frames);
+
+ if(stream->flow == eCapture){
+ stream->cap_bufsize_frames = muldiv(params->duration, stream->dev_desc.mSampleRate, 10000000);
+ stream->cap_buffer = malloc(stream->cap_bufsize_frames * stream->fmt->nBlockAlign);
+ }
+ params->result = S_OK;
+
+end:
+ if(FAILED(params->result)){
+ if(stream->converter) AudioConverterDispose(stream->converter);
+ if(stream->unit) AudioComponentInstanceDispose(stream->unit);
+ free(stream->fmt);
+ free(stream);
+ } else
+ params->stream = stream;
+
+ return STATUS_SUCCESS;
+}
+
+static NTSTATUS release_stream( void *args )
+{
+ struct release_stream_params *params = args;
+ struct coreaudio_stream *stream = params->stream;
+
+ if(stream->unit){
+ AudioOutputUnitStop(stream->unit);
+ AudioComponentInstanceDispose(stream->unit);
+ }
+
+ if(stream->converter) AudioConverterDispose(stream->converter);
+ free(stream->wrap_buffer);
+ free(stream->cap_buffer);
+ if(stream->local_buffer)
+ NtFreeVirtualMemory(GetCurrentProcess(), (void **)&stream->local_buffer,
+ &stream->local_buffer_size, MEM_RELEASE);
+ free(stream->fmt);
+ params->result = S_OK;
+ return STATUS_SUCCESS;
+}
+
unixlib_entry_t __wine_unix_call_funcs[] =
{
get_endpoint_ids,
+ create_stream,
+ release_stream,
};
diff --git a/dlls/winecoreaudio.drv/mmdevdrv.c b/dlls/winecoreaudio.drv/mmdevdrv.c
index 0abfebfb1a5..572126562c6 100644
--- a/dlls/winecoreaudio.drv/mmdevdrv.c
+++ b/dlls/winecoreaudio.drv/mmdevdrv.c
@@ -138,6 +138,10 @@ struct ACImpl {
struct coreaudio_stream *stream;
struct list entry;
+
+ /* Temporary */
+ BYTE *feed_wrap_buffer;
+ UINT32 feed_wrap_bufsize_frames;
};
static const IAudioClient3Vtbl AudioClient3_Vtbl;
@@ -613,7 +617,9 @@ static ULONG WINAPI AudioClient_AddRef(IAudioClient3 *iface)
static ULONG WINAPI AudioClient_Release(IAudioClient3 *iface)
{
ACImpl *This = impl_from_IAudioClient3(iface);
+ struct release_stream_params params;
ULONG ref;
+
ref = InterlockedDecrement(&This->ref);
TRACE("(%p) Refcount now %u\n", This, ref);
if(!ref){
@@ -627,22 +633,13 @@ static ULONG WINAPI AudioClient_Release(IAudioClient3 *iface)
WaitForSingleObject(event, INFINITE);
CloseHandle(event);
}
- if (This->stream){
- AudioOutputUnitStop(This->stream->unit);
- AudioComponentInstanceDispose(This->stream->unit);
- if(This->stream->converter)
- AudioConverterDispose(This->stream->converter);
- HeapFree(GetProcessHeap(), 0, This->stream->cap_buffer);
- if(This->stream->local_buffer)
- NtFreeVirtualMemory(GetCurrentProcess(), (void **)&This->stream->local_buffer,
- &This->stream->local_buffer_size, MEM_RELEASE);
+ if(This->stream){
if(This->stream->tmp_buffer)
NtFreeVirtualMemory(GetCurrentProcess(), (void **)&This->stream->tmp_buffer,
&This->stream->tmp_buffer_size, MEM_RELEASE);
- free(This->stream->wrap_buffer);
HeapFree(GetProcessHeap(), 0, This->stream->resamp_buffer);
- CoTaskMemFree(This->stream->fmt);
- HeapFree(GetProcessHeap(), 0, This->stream);
+ params.stream = This->stream;
+ UNIX_CALL(release_stream, ¶ms);
}
if(This->session){
EnterCriticalSection(&g_sessions_lock);
@@ -650,6 +647,7 @@ static ULONG WINAPI AudioClient_Release(IAudioClient3 *iface)
LeaveCriticalSection(&g_sessions_lock);
}
HeapFree(GetProcessHeap(), 0, This->vols);
+ free(This->feed_wrap_buffer);
IMMDevice_Release(This->parent);
IUnknown_Release(This->pUnkFTMarshal);
HeapFree(GetProcessHeap(), 0, This);
@@ -717,27 +715,6 @@ static DWORD get_channel_mask(unsigned int channels)
return 0;
}
-static WAVEFORMATEX *clone_format(const WAVEFORMATEX *fmt)
-{
- WAVEFORMATEX *ret;
- size_t size;
-
- if(fmt->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
- size = sizeof(WAVEFORMATEXTENSIBLE);
- else
- size = sizeof(WAVEFORMATEX);
-
- ret = CoTaskMemAlloc(size);
- if(!ret)
- return NULL;
-
- memcpy(ret, fmt, size);
-
- ret->cbSize = size - sizeof(WAVEFORMATEX);
-
- return ret;
-}
-
static HRESULT ca_get_audiodesc(AudioStreamBasicDescription *desc,
const WAVEFORMATEX *fmt)
{
@@ -881,43 +858,6 @@ static void silence_buffer(struct coreaudio_stream *stream, BYTE *buffer, UINT32
memset(buffer, 0, frames * stream->fmt->nBlockAlign);
}
-/* CA is pulling data from us */
-static OSStatus ca_render_cb(void *user, AudioUnitRenderActionFlags *flags,
- const AudioTimeStamp *ts, UInt32 bus, UInt32 nframes,
- AudioBufferList *data)
-{
- ACImpl *This = user;
- UINT32 to_copy_bytes, to_copy_frames, chunk_bytes, lcl_offs_bytes;
-
- OSSpinLockLock(&This->stream->lock);
-
- if(This->stream->playing){
- lcl_offs_bytes = This->stream->lcl_offs_frames * This->stream->fmt->nBlockAlign;
- to_copy_frames = min(nframes, This->stream->held_frames);
- to_copy_bytes = to_copy_frames * This->stream->fmt->nBlockAlign;
-
- chunk_bytes = (This->stream->bufsize_frames - This->stream->lcl_offs_frames) * This->stream->fmt->nBlockAlign;
-
- if(to_copy_bytes > chunk_bytes){
- memcpy(data->mBuffers[0].mData, This->stream->local_buffer + lcl_offs_bytes, chunk_bytes);
- memcpy(((BYTE *)data->mBuffers[0].mData) + chunk_bytes, This->stream->local_buffer, to_copy_bytes - chunk_bytes);
- }else
- memcpy(data->mBuffers[0].mData, This->stream->local_buffer + lcl_offs_bytes, to_copy_bytes);
-
- This->stream->lcl_offs_frames += to_copy_frames;
- This->stream->lcl_offs_frames %= This->stream->bufsize_frames;
- This->stream->held_frames -= to_copy_frames;
- }else
- to_copy_bytes = to_copy_frames = 0;
-
- if(nframes > to_copy_frames)
- silence_buffer(This->stream, ((BYTE *)data->mBuffers[0].mData) + to_copy_bytes, nframes - to_copy_frames);
-
- OSSpinLockUnlock(&This->stream->lock);
-
- return noErr;
-}
-
static UINT buf_ptr_diff(UINT left, UINT right, UINT bufsize)
{
if(left <= right)
@@ -945,18 +885,18 @@ static OSStatus feed_cb(AudioConverterRef converter, UInt32 *nframes, AudioBuffe
if(This->stream->cap_offs_frames + *nframes > This->stream->cap_bufsize_frames){
UINT32 chunk_frames = This->stream->cap_bufsize_frames - This->stream->cap_offs_frames;
- if(This->stream->wrap_bufsize_frames < *nframes){
- free(This->stream->wrap_buffer);
- This->stream->wrap_buffer = malloc(data->mBuffers[0].mDataByteSize);
- This->stream->wrap_bufsize_frames = *nframes;
+ if(This->feed_wrap_bufsize_frames < *nframes){
+ free(This->feed_wrap_buffer);
+ This->feed_wrap_buffer = malloc(data->mBuffers[0].mDataByteSize);
+ This->feed_wrap_bufsize_frames = *nframes;
}
- memcpy(This->stream->wrap_buffer, This->stream->cap_buffer + This->stream->cap_offs_frames * This->stream->fmt->nBlockAlign,
+ memcpy(This->feed_wrap_buffer, This->stream->cap_buffer + This->stream->cap_offs_frames * This->stream->fmt->nBlockAlign,
chunk_frames * This->stream->fmt->nBlockAlign);
- memcpy(This->stream->wrap_buffer + chunk_frames * This->stream->fmt->nBlockAlign, This->stream->cap_buffer,
+ memcpy(This->feed_wrap_buffer + chunk_frames * This->stream->fmt->nBlockAlign, This->stream->cap_buffer,
(*nframes - chunk_frames) * This->stream->fmt->nBlockAlign);
- data->mBuffers[0].mData = This->stream->wrap_buffer;
+ data->mBuffers[0].mData = This->feed_wrap_buffer;
}else
data->mBuffers[0].mData = This->stream->cap_buffer + This->stream->cap_offs_frames * This->stream->fmt->nBlockAlign;
@@ -1017,67 +957,6 @@ static void capture_resample(ACImpl *This)
}
}
-/* we need to trigger CA to pull data from the device and give it to us
- *
- * raw data from CA is stored in cap_buffer, possibly via wrap_buffer
- *
- * raw data is resampled from cap_buffer into resamp_buffer in period-size
- * chunks and copied to local_buffer
- */
-static OSStatus ca_capture_cb(void *user, AudioUnitRenderActionFlags *flags,
- const AudioTimeStamp *ts, UInt32 bus, UInt32 nframes,
- AudioBufferList *data)
-{
- ACImpl *This = user;
- AudioBufferList list;
- OSStatus sc;
- UINT32 cap_wri_offs_frames;
-
- OSSpinLockLock(&This->stream->lock);
-
- cap_wri_offs_frames = (This->stream->cap_offs_frames + This->stream->cap_held_frames) % This->stream->cap_bufsize_frames;
-
- list.mNumberBuffers = 1;
- list.mBuffers[0].mNumberChannels = This->stream->fmt->nChannels;
- list.mBuffers[0].mDataByteSize = nframes * This->stream->fmt->nBlockAlign;
-
- if(!This->stream->playing || cap_wri_offs_frames + nframes > This->stream->cap_bufsize_frames){
- if(This->stream->wrap_bufsize_frames < nframes){
- free(This->stream->wrap_buffer);
- This->stream->wrap_buffer = malloc(list.mBuffers[0].mDataByteSize);
- This->stream->wrap_bufsize_frames = nframes;
- }
-
- list.mBuffers[0].mData = This->stream->wrap_buffer;
- }else
- list.mBuffers[0].mData = This->stream->cap_buffer + cap_wri_offs_frames * This->stream->fmt->nBlockAlign;
-
- sc = AudioUnitRender(This->stream->unit, flags, ts, bus, nframes, &list);
- if(sc != noErr){
- OSSpinLockUnlock(&This->stream->lock);
- return sc;
- }
-
- if(This->stream->playing){
- if(list.mBuffers[0].mData == This->stream->wrap_buffer){
- ca_wrap_buffer(This->stream->cap_buffer,
- cap_wri_offs_frames * This->stream->fmt->nBlockAlign,
- This->stream->cap_bufsize_frames * This->stream->fmt->nBlockAlign,
- This->stream->wrap_buffer, list.mBuffers[0].mDataByteSize);
- }
-
- This->stream->cap_held_frames += list.mBuffers[0].mDataByteSize / This->stream->fmt->nBlockAlign;
- if(This->stream->cap_held_frames > This->stream->cap_bufsize_frames){
- This->stream->cap_offs_frames += This->stream->cap_held_frames % This->stream->cap_bufsize_frames;
- This->stream->cap_offs_frames %= This->stream->cap_bufsize_frames;
- This->stream->cap_held_frames = This->stream->cap_bufsize_frames;
- }
- }
-
- OSSpinLockUnlock(&This->stream->lock);
- return noErr;
-}
-
static void dump_adesc(const char *aux, AudioStreamBasicDescription *desc)
{
TRACE("%s: mSampleRate: %f\n", aux, desc->mSampleRate);
@@ -1168,9 +1047,8 @@ static HRESULT WINAPI AudioClient_Initialize(IAudioClient3 *iface,
const GUID *sessionguid)
{
ACImpl *This = impl_from_IAudioClient3(iface);
- struct coreaudio_stream *stream;
- HRESULT hr;
- OSStatus sc;
+ struct release_stream_params release_params;
+ struct create_stream_params params;
UINT32 i;
TRACE("(%p)->(%x, %x, %s, %s, %p, %s)\n", This, mode, flags,
@@ -1233,146 +1111,50 @@ static HRESULT WINAPI AudioClient_Initialize(IAudioClient3 *iface,
return AUDCLNT_E_ALREADY_INITIALIZED;
}
- stream = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*stream));
- if(!stream){
- LeaveCriticalSection(&g_sessions_lock);
- return E_OUTOFMEMORY;
- }
-
- stream->fmt = clone_format(fmt);
- if(!stream->fmt){
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return E_OUTOFMEMORY;
- }
-
- stream->period_ms = period / 10000;
- stream->period_frames = MulDiv(period, stream->fmt->nSamplesPerSec, 10000000);
-
- stream->bufsize_frames = MulDiv(duration, fmt->nSamplesPerSec, 10000000);
- if(mode == AUDCLNT_SHAREMODE_EXCLUSIVE)
- stream->bufsize_frames -= stream->bufsize_frames % stream->period_frames;
-
- if(!(stream->unit = get_audiounit(This->dataflow, This->adevid))){
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return AUDCLNT_E_DEVICE_INVALIDATED;
- }
-
- hr = ca_setup_audiounit(This->dataflow, stream->unit, stream->fmt, &stream->dev_desc, &stream->converter);
- if(FAILED(hr)){
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return hr;
- }
-
- if(This->dataflow == eCapture){
- AURenderCallbackStruct input;
-
- memset(&input, 0, sizeof(input));
- input.inputProc = &ca_capture_cb;
- input.inputProcRefCon = This;
-
- sc = AudioUnitSetProperty(stream->unit, kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Output, 1, &input, sizeof(input));
- if(sc != noErr){
- WARN("Couldn't set callback: %x\n", (int)sc);
- AudioConverterDispose(stream->converter);
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return osstatus_to_hresult(sc);
- }
- }else{
- AURenderCallbackStruct input;
-
- memset(&input, 0, sizeof(input));
- input.inputProc = &ca_render_cb;
- input.inputProcRefCon = This;
-
- sc = AudioUnitSetProperty(stream->unit, kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input, 0, &input, sizeof(input));
- if(sc != noErr){
- WARN("Couldn't set callback: %x\n", (int)sc);
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return osstatus_to_hresult(sc);
- }
- }
+ params.dev_id = This->adevid;
+ params.flow = This->dataflow;
+ params.share = mode;
+ params.duration = duration;
+ params.period = period;
+ params.fmt = fmt;
- sc = AudioUnitInitialize(stream->unit);
- if(sc != noErr){
- WARN("Couldn't initialize: %x\n", (int)sc);
- if(stream->converter)
- AudioConverterDispose(stream->converter);
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return osstatus_to_hresult(sc);
- }
-
- /* we play audio continuously because AudioOutputUnitStart sometimes takes
- * a while to return */
- sc = AudioOutputUnitStart(stream->unit);
- if(sc != noErr){
- WARN("Unit failed to start: %x\n", (int)sc);
- if(stream->converter)
- AudioConverterDispose(stream->converter);
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return osstatus_to_hresult(sc);
- }
-
- stream->local_buffer_size = stream->bufsize_frames * fmt->nBlockAlign;
- NtAllocateVirtualMemory(GetCurrentProcess(), (void **)&stream->local_buffer, 0,
- &stream->local_buffer_size, MEM_COMMIT, PAGE_READWRITE);
- silence_buffer(stream, stream->local_buffer, stream->bufsize_frames);
-
- if(This->dataflow == eCapture){
- stream->cap_bufsize_frames = MulDiv(duration, stream->dev_desc.mSampleRate, 10000000);
- stream->cap_buffer = HeapAlloc(GetProcessHeap(), 0, stream->cap_bufsize_frames * stream->fmt->nBlockAlign);
- }
+ UNIX_CALL(create_stream, ¶ms);
+ if(FAILED(params.result)) goto end;
- stream->share = mode;
This->flags = flags;
This->channel_count = fmt->nChannels;
This->period_ms = period / 10000;
This->vols = HeapAlloc(GetProcessHeap(), 0, This->channel_count * sizeof(float));
if(!This->vols){
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- LeaveCriticalSection(&g_sessions_lock);
- return E_OUTOFMEMORY;
+ params.result = E_OUTOFMEMORY;
+ goto end;
}
for(i = 0; i < This->channel_count; ++i)
This->vols[i] = 1.f;
- hr = get_audio_session(sessionguid, This->parent, fmt->nChannels,
- &This->session);
- if(FAILED(hr)){
- CoTaskMemFree(stream->fmt);
- HeapFree(GetProcessHeap(), 0, stream);
- HeapFree(GetProcessHeap(), 0, This->vols);
- This->vols = NULL;
- LeaveCriticalSection(&g_sessions_lock);
- return E_INVALIDARG;
- }
+ params.result = get_audio_session(sessionguid, This->parent, fmt->nChannels, &This->session);
+ if(FAILED(params.result)) goto end;
list_add_tail(&This->session->clients, &This->entry);
- ca_setvol(This, stream, -1);
+ ca_setvol(This, params.stream, -1);
- This->stream = stream;
+end:
+ if(FAILED(params.result)){
+ if(params.stream){
+ release_params.stream = This->stream;
+ UNIX_CALL(release_stream, &release_params);
+ }
+ HeapFree(GetProcessHeap(), 0, This->vols);
+ This->vols = NULL;
+ }else
+ This->stream = params.stream;
LeaveCriticalSection(&g_sessions_lock);
- return S_OK;
+ return params.result;
}
static HRESULT WINAPI AudioClient_GetBufferSize(IAudioClient3 *iface,
diff --git a/dlls/winecoreaudio.drv/unixlib.h b/dlls/winecoreaudio.drv/unixlib.h
index 1b773b7f820..7ebdc0b7786 100644
--- a/dlls/winecoreaudio.drv/unixlib.h
+++ b/dlls/winecoreaudio.drv/unixlib.h
@@ -57,9 +57,29 @@ struct get_endpoint_ids_params
unsigned int default_idx;
};
+struct create_stream_params
+{
+ DWORD dev_id;
+ EDataFlow flow;
+ AUDCLNT_SHAREMODE share;
+ REFERENCE_TIME duration;
+ REFERENCE_TIME period;
+ const WAVEFORMATEX *fmt;
+ HRESULT result;
+ struct coreaudio_stream *stream;
+};
+
+struct release_stream_params
+{
+ struct coreaudio_stream *stream;
+ HRESULT result;
+};
+
enum unix_funcs
{
unix_get_endpoint_ids,
+ unix_create_stream,
+ unix_release_stream,
};
extern unixlib_handle_t coreaudio_handle;
--
2.23.0