From: Shaun Ren sren@codeweavers.com
--- dlls/winegstreamer/Makefile.in | 1 + dlls/winegstreamer/video_decoder.c | 733 ++++++++++++++++++++++++++ dlls/winegstreamer/winegstreamer.spec | 1 + 3 files changed, 735 insertions(+) create mode 100644 dlls/winegstreamer/video_decoder.c
diff --git a/dlls/winegstreamer/Makefile.in b/dlls/winegstreamer/Makefile.in index 064a8b68343..82b1c148d6b 100644 --- a/dlls/winegstreamer/Makefile.in +++ b/dlls/winegstreamer/Makefile.in @@ -16,6 +16,7 @@ C_SRCS = \ quartz_parser.c \ quartz_transform.c \ resampler.c \ + video_decoder.c \ video_processor.c \ wg_allocator.c \ wg_format.c \ diff --git a/dlls/winegstreamer/video_decoder.c b/dlls/winegstreamer/video_decoder.c new file mode 100644 index 00000000000..51ced8ebd34 --- /dev/null +++ b/dlls/winegstreamer/video_decoder.c @@ -0,0 +1,733 @@ +/* Generic Video Decoder Transform + * + * Copyright 2022 Rémi Bernon for CodeWeavers + * Copyright 2023 Shaun Ren for CodeWeavers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "gst_private.h" + +#include "mfapi.h" +#include "mferror.h" +#include "mfobjects.h" +#include "mftransform.h" + +#include "wine/debug.h" + +WINE_DEFAULT_DEBUG_CHANNEL(mfplat); + +DEFINE_MEDIATYPE_GUID(MFVideoFormat_IV50, MAKEFOURCC('I','V','5','0')); + +static const GUID *const input_types[] = +{ + &MFVideoFormat_H264, + &MFVideoFormat_IV50, +}; +static const GUID *const output_types[] = +{ + &MFVideoFormat_YV12, + &MFVideoFormat_YUY2, + &MFVideoFormat_NV11, + &MFVideoFormat_NV12, + &MFVideoFormat_RGB32, + &MFVideoFormat_RGB24, + &MFVideoFormat_RGB565, + &MFVideoFormat_RGB555, + &MFVideoFormat_RGB8, +}; + +struct video_decoder +{ + IMFTransform IMFTransform_iface; + LONG refcount; + + IMFAttributes *attributes; + IMFAttributes *output_attributes; + + IMFMediaType *input_type; + MFT_INPUT_STREAM_INFO input_info; + IMFMediaType *output_type; + MFT_OUTPUT_STREAM_INFO output_info; + + struct wg_format wg_format; + struct wg_transform *wg_transform; + struct wg_sample_queue *wg_sample_queue; +}; + +static struct video_decoder *impl_from_IMFTransform(IMFTransform *iface) +{ + return CONTAINING_RECORD(iface, struct video_decoder, IMFTransform_iface); +} + +static HRESULT try_create_wg_transform(struct video_decoder *decoder) +{ + struct wg_format input_format; + struct wg_format output_format; + + if (decoder->wg_transform) + wg_transform_destroy(decoder->wg_transform); + decoder->wg_transform = NULL; + + mf_media_type_to_wg_format(decoder->input_type, &input_format); + if (input_format.major_type == WG_MAJOR_TYPE_UNKNOWN) + return MF_E_INVALIDMEDIATYPE; + + mf_media_type_to_wg_format(decoder->output_type, &output_format); + if (output_format.major_type == WG_MAJOR_TYPE_UNKNOWN) + return MF_E_INVALIDMEDIATYPE; + + /* Don't force any specific size, the video stream already has the metadata for it + * and will generate a MF_E_TRANSFORM_STREAM_CHANGE result later. + */ + output_format.u.video.width = 0; + output_format.u.video.height = 0; + output_format.u.video.fps_d = 0; + output_format.u.video.fps_n = 0; + + if (!(decoder->wg_transform = wg_transform_create(&input_format, &output_format))) + { + ERR("Failed to create transform with input major_type %u.\n", input_format.major_type); + return E_FAIL; + } + + return S_OK; +} + +static HRESULT fill_output_media_type(struct video_decoder *decoder, IMFMediaType *media_type) +{ + IMFMediaType *default_type = decoder->output_type; + struct wg_format *wg_format = &decoder->wg_format; + UINT32 value, width, height; + UINT64 ratio; + GUID subtype; + HRESULT hr; + + if (FAILED(hr = IMFMediaType_GetGUID(media_type, &MF_MT_SUBTYPE, &subtype))) + return hr; + + if (FAILED(hr = IMFMediaType_GetUINT64(media_type, &MF_MT_FRAME_SIZE, &ratio))) + { + ratio = (UINT64)wg_format->u.video.width << 32 | wg_format->u.video.height; + if (FAILED(hr = IMFMediaType_SetUINT64(media_type, &MF_MT_FRAME_SIZE, ratio))) + return hr; + } + width = ratio >> 32; + height = ratio; + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_FRAME_RATE, NULL))) + { + ratio = (UINT64)wg_format->u.video.fps_n << 32 | wg_format->u.video.fps_d; + if (FAILED(hr = IMFMediaType_SetUINT64(media_type, &MF_MT_FRAME_RATE, ratio))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_PIXEL_ASPECT_RATIO, NULL))) + { + ratio = (UINT64)1 << 32 | 1; /* FIXME: read it from format */ + if (FAILED(hr = IMFMediaType_SetUINT64(media_type, &MF_MT_PIXEL_ASPECT_RATIO, ratio))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_SAMPLE_SIZE, NULL))) + { + if (FAILED(hr = MFCalculateImageSize(&subtype, width, height, &value))) + return hr; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_SAMPLE_SIZE, value))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_DEFAULT_STRIDE, NULL))) + { + if (FAILED(hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, width, (LONG *)&value))) + return hr; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_DEFAULT_STRIDE, value))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_INTERLACE_MODE, NULL))) + { + if (!default_type || FAILED(hr = IMFMediaType_GetUINT32(default_type, &MF_MT_INTERLACE_MODE, &value))) + value = MFVideoInterlace_MixedInterlaceOrProgressive; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_INTERLACE_MODE, value))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_ALL_SAMPLES_INDEPENDENT, NULL))) + { + if (!default_type || FAILED(hr = IMFMediaType_GetUINT32(default_type, &MF_MT_ALL_SAMPLES_INDEPENDENT, &value))) + value = 1; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_ALL_SAMPLES_INDEPENDENT, value))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_VIDEO_ROTATION, NULL))) + { + if (!default_type || FAILED(hr = IMFMediaType_GetUINT32(default_type, &MF_MT_VIDEO_ROTATION, &value))) + value = 0; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_VIDEO_ROTATION, value))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_FIXED_SIZE_SAMPLES, NULL))) + { + if (!default_type || FAILED(hr = IMFMediaType_GetUINT32(default_type, &MF_MT_FIXED_SIZE_SAMPLES, &value))) + value = 1; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_FIXED_SIZE_SAMPLES, value))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_MINIMUM_DISPLAY_APERTURE, NULL)) + && !IsRectEmpty(&wg_format->u.video.padding)) + { + MFVideoArea aperture = + { + .OffsetX = {.value = wg_format->u.video.padding.left}, + .OffsetY = {.value = wg_format->u.video.padding.top}, + .Area.cx = wg_format->u.video.width - wg_format->u.video.padding.right - wg_format->u.video.padding.left, + .Area.cy = wg_format->u.video.height - wg_format->u.video.padding.bottom - wg_format->u.video.padding.top, + }; + + if (FAILED(hr = IMFMediaType_SetBlob(media_type, &MF_MT_MINIMUM_DISPLAY_APERTURE, + (BYTE *)&aperture, sizeof(aperture)))) + return hr; + } + + return S_OK; +} + +static HRESULT WINAPI transform_QueryInterface(IMFTransform *iface, REFIID iid, void **out) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + + TRACE("iface %p, iid %s, out %p.\n", iface, debugstr_guid(iid), out); + + if (IsEqualGUID(iid, &IID_IUnknown) || + IsEqualGUID(iid, &IID_IMFTransform)) + *out = &decoder->IMFTransform_iface; + else + { + *out = NULL; + WARN("%s not implemented, returning E_NOINTERFACE.\n", debugstr_guid(iid)); + return E_NOINTERFACE; + } + + IUnknown_AddRef((IUnknown *)*out); + return S_OK; +} + +static ULONG WINAPI transform_AddRef(IMFTransform *iface) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + ULONG refcount = InterlockedIncrement(&decoder->refcount); + + TRACE("iface %p increasing refcount to %lu.\n", decoder, refcount); + + return refcount; +} + +static ULONG WINAPI transform_Release(IMFTransform *iface) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + ULONG refcount = InterlockedDecrement(&decoder->refcount); + + TRACE("iface %p decreasing refcount to %lu.\n", decoder, refcount); + + if (!refcount) + { + if (decoder->wg_transform) + wg_transform_destroy(decoder->wg_transform); + if (decoder->input_type) + IMFMediaType_Release(decoder->input_type); + if (decoder->output_type) + IMFMediaType_Release(decoder->output_type); + if (decoder->output_attributes) + IMFAttributes_Release(decoder->output_attributes); + if (decoder->attributes) + IMFAttributes_Release(decoder->attributes); + + wg_sample_queue_destroy(decoder->wg_sample_queue); + free(decoder); + } + + return refcount; +} + +static HRESULT WINAPI transform_GetStreamLimits(IMFTransform *iface, DWORD *input_minimum, + DWORD *input_maximum, DWORD *output_minimum, DWORD *output_maximum) +{ + TRACE("iface %p, input_minimum %p, input_maximum %p, output_minimum %p, output_maximum %p.\n", + iface, input_minimum, input_maximum, output_minimum, output_maximum); + *input_minimum = *input_maximum = *output_minimum = *output_maximum = 1; + return S_OK; +} + +static HRESULT WINAPI transform_GetStreamCount(IMFTransform *iface, DWORD *inputs, DWORD *outputs) +{ + TRACE("iface %p, inputs %p, outputs %p.\n", iface, inputs, outputs); + *inputs = *outputs = 1; + return S_OK; +} + +static HRESULT WINAPI transform_GetStreamIDs(IMFTransform *iface, DWORD input_size, DWORD *inputs, + DWORD output_size, DWORD *outputs) +{ + TRACE("iface %p, input_size %lu, inputs %p, output_size %lu, outputs %p.\n", iface, + input_size, inputs, output_size, outputs); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_GetInputStreamInfo(IMFTransform *iface, DWORD id, MFT_INPUT_STREAM_INFO *info) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + + TRACE("iface %p, id %#lx, info %p.\n", iface, id, info); + + *info = decoder->input_info; + return S_OK; +} + +static HRESULT WINAPI transform_GetOutputStreamInfo(IMFTransform *iface, DWORD id, MFT_OUTPUT_STREAM_INFO *info) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + + TRACE("iface %p, id %#lx, info %p.\n", iface, id, info); + + *info = decoder->output_info; + return S_OK; +} + +static HRESULT WINAPI transform_GetAttributes(IMFTransform *iface, IMFAttributes **attributes) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + + TRACE("iface %p, attributes %p semi-stub!\n", iface, attributes); + + if (!attributes) + return E_POINTER; + + IMFAttributes_AddRef((*attributes = decoder->attributes)); + return S_OK; +} + +static HRESULT WINAPI transform_GetInputStreamAttributes(IMFTransform *iface, DWORD id, IMFAttributes **attributes) +{ + FIXME("iface %p, id %#lx, attributes %p.\n", iface, id, attributes); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_GetOutputStreamAttributes(IMFTransform *iface, DWORD id, IMFAttributes **attributes) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + + TRACE("iface %p, id %#lx, attributes %p semi-stub!\n", iface, id, attributes); + + if (!attributes) + return E_POINTER; + if (id) + return MF_E_INVALIDSTREAMNUMBER; + + IMFAttributes_AddRef((*attributes = decoder->output_attributes)); + return S_OK; +} + +static HRESULT WINAPI transform_DeleteInputStream(IMFTransform *iface, DWORD id) +{ + FIXME("iface %p, id %#lx.\n", iface, id); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_AddInputStreams(IMFTransform *iface, DWORD streams, DWORD *ids) +{ + FIXME("iface %p, streams %lu, ids %p.\n", iface, streams, ids); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_GetInputAvailableType(IMFTransform *iface, DWORD id, DWORD index, + IMFMediaType **type) +{ + IMFMediaType *media_type; + const GUID *subtype; + HRESULT hr; + + TRACE("iface %p, id %#lx, index %#lx, type %p.\n", iface, id, index, type); + + *type = NULL; + + if (index >= ARRAY_SIZE(input_types)) + return MF_E_NO_MORE_TYPES; + subtype = input_types[index]; + + if (FAILED(hr = MFCreateMediaType(&media_type))) + return hr; + + if (SUCCEEDED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_MAJOR_TYPE, &MFMediaType_Video)) && + SUCCEEDED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_SUBTYPE, subtype))) + IMFMediaType_AddRef((*type = media_type)); + + IMFMediaType_Release(media_type); + return hr; +} + +static HRESULT WINAPI transform_GetOutputAvailableType(IMFTransform *iface, DWORD id, + DWORD index, IMFMediaType **type) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + IMFMediaType *media_type; + const GUID *output_type; + HRESULT hr; + + TRACE("iface %p, id %#lx, index %#lx, type %p.\n", iface, id, index, type); + + if (!decoder->input_type) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + *type = NULL; + + if (index >= ARRAY_SIZE(output_types)) + return MF_E_NO_MORE_TYPES; + output_type = output_types[index]; + + if (FAILED(hr = MFCreateMediaType(&media_type))) + return hr; + + if (FAILED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_MAJOR_TYPE, &MFMediaType_Video))) + goto done; + if (FAILED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_SUBTYPE, output_type))) + goto done; + + hr = fill_output_media_type(decoder, media_type); + +done: + if (SUCCEEDED(hr)) + IMFMediaType_AddRef((*type = media_type)); + + IMFMediaType_Release(media_type); + return hr; +} + +static HRESULT WINAPI transform_SetInputType(IMFTransform *iface, DWORD id, IMFMediaType *type, DWORD flags) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + GUID major, subtype; + UINT64 frame_size; + HRESULT hr; + ULONG i; + + TRACE("iface %p, id %#lx, type %p, flags %#lx.\n", iface, id, type, flags); + + if (FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_MAJOR_TYPE, &major)) || + FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return E_INVALIDARG; + + if (!IsEqualGUID(&major, &MFMediaType_Video)) + return MF_E_INVALIDMEDIATYPE; + + for (i = 0; i < ARRAY_SIZE(input_types); ++i) + if (IsEqualGUID(&subtype, input_types[i])) + break; + if (i == ARRAY_SIZE(input_types)) + return MF_E_INVALIDMEDIATYPE; + if (flags & MFT_SET_TYPE_TEST_ONLY) + return S_OK; + + if (decoder->output_type) + { + IMFMediaType_Release(decoder->output_type); + decoder->output_type = NULL; + } + + if (decoder->input_type) + IMFMediaType_Release(decoder->input_type); + IMFMediaType_AddRef((decoder->input_type = type)); + + if (SUCCEEDED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size))) + { + decoder->wg_format.u.video.width = frame_size >> 32; + decoder->wg_format.u.video.height = (UINT32)frame_size; + decoder->output_info.cbSize = decoder->wg_format.u.video.width + * decoder->wg_format.u.video.height * 4; + } + + return S_OK; +} + +static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMFMediaType *type, DWORD flags) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + GUID major, subtype; + UINT64 frame_size; + HRESULT hr; + ULONG i; + + TRACE("iface %p, id %#lx, type %p, flags %#lx.\n", iface, id, type, flags); + + if (!decoder->input_type) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + if (FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_MAJOR_TYPE, &major)) || + FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return hr; + + if (!IsEqualGUID(&major, &MFMediaType_Video)) + return MF_E_INVALIDMEDIATYPE; + + for (i = 0; i < ARRAY_SIZE(output_types); ++i) + if (IsEqualGUID(&subtype, output_types[i])) + break; + if (i == ARRAY_SIZE(output_types)) + return MF_E_INVALIDMEDIATYPE; + + if (FAILED(hr = IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size)) + || (frame_size >> 32) != decoder->wg_format.u.video.width + || (UINT32)frame_size != decoder->wg_format.u.video.height) + return MF_E_INVALIDMEDIATYPE; + if (flags & MFT_SET_TYPE_TEST_ONLY) + return S_OK; + + if (decoder->output_type) + IMFMediaType_Release(decoder->output_type); + IMFMediaType_AddRef((decoder->output_type = type)); + + if (decoder->wg_transform) + { + struct wg_format output_format; + mf_media_type_to_wg_format(decoder->output_type, &output_format); + + /* Don't force any specific size, the video stream already has the metadata for it + * and will generate a MF_E_TRANSFORM_STREAM_CHANGE result later. + */ + output_format.u.video.width = 0; + output_format.u.video.height = 0; + output_format.u.video.fps_d = 0; + output_format.u.video.fps_n = 0; + + if (output_format.major_type == WG_MAJOR_TYPE_UNKNOWN + || !wg_transform_set_output_format(decoder->wg_transform, &output_format)) + { + IMFMediaType_Release(decoder->output_type); + decoder->output_type = NULL; + return MF_E_INVALIDMEDIATYPE; + } + } + else if (FAILED(hr = try_create_wg_transform(decoder))) + { + IMFMediaType_Release(decoder->output_type); + decoder->output_type = NULL; + } + + return hr; +} + +static HRESULT WINAPI transform_GetInputCurrentType(IMFTransform *iface, DWORD id, IMFMediaType **type) +{ + FIXME("iface %p, id %#lx, type %p stub!\n", iface, id, type); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_GetOutputCurrentType(IMFTransform *iface, DWORD id, IMFMediaType **type) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + HRESULT hr; + + FIXME("iface %p, id %#lx, type %p stub!\n", iface, id, type); + + if (!decoder->output_type) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + if (FAILED(hr = MFCreateMediaType(type))) + return hr; + + return IMFMediaType_CopyAllItems(decoder->output_type, (IMFAttributes *)*type); +} + +static HRESULT WINAPI transform_GetInputStatus(IMFTransform *iface, DWORD id, DWORD *flags) +{ + FIXME("iface %p, id %#lx, flags %p stub!\n", iface, id, flags); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_GetOutputStatus(IMFTransform *iface, DWORD *flags) +{ + FIXME("iface %p, flags %p stub!\n", iface, flags); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_SetOutputBounds(IMFTransform *iface, LONGLONG lower, LONGLONG upper) +{ + TRACE("iface %p, lower %I64d, upper %I64d.\n", iface, lower, upper); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_ProcessEvent(IMFTransform *iface, DWORD id, IMFMediaEvent *event) +{ + FIXME("iface %p, id %#lx, event %p stub!\n", iface, id, event); + return E_NOTIMPL; +} + +static HRESULT WINAPI transform_ProcessMessage(IMFTransform *iface, MFT_MESSAGE_TYPE message, ULONG_PTR param) +{ + FIXME("iface %p, message %#x, param %Ix stub!\n", iface, message, param); + return S_OK; +} + +static HRESULT WINAPI transform_ProcessInput(IMFTransform *iface, DWORD id, IMFSample *sample, DWORD flags) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + HRESULT hr; + + TRACE("iface %p, id %#lx, sample %p, flags %#lx.\n", iface, id, sample, flags); + + if (!decoder->wg_transform) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + hr = wg_transform_push_mf(decoder->wg_transform, sample, decoder->wg_sample_queue); + + TRACE("returning %#lx.\n", hr); + return hr; +} + +static HRESULT WINAPI transform_ProcessOutput(IMFTransform *iface, DWORD flags, DWORD count, + MFT_OUTPUT_DATA_BUFFER *samples, DWORD *status) +{ + struct video_decoder *decoder = impl_from_IMFTransform(iface); + struct wg_format wg_format; + UINT32 sample_size; + UINT64 frame_rate; + GUID subtype; + HRESULT hr; + + TRACE("iface %p, flags %#lx, count %lu, samples %p, status %p.\n", iface, flags, count, samples, status); + + if (count != 1) + return E_INVALIDARG; + + if (!decoder->wg_transform) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + *status = samples->dwStatus = 0; + if (!samples->pSample) + return E_INVALIDARG; + + if (FAILED(hr = IMFMediaType_GetGUID(decoder->output_type, &MF_MT_SUBTYPE, &subtype))) + return hr; + if (FAILED(hr = MFCalculateImageSize(&subtype, decoder->wg_format.u.video.width, + decoder->wg_format.u.video.height, &sample_size))) + return hr; + + if (SUCCEEDED(hr = wg_transform_read_mf(decoder->wg_transform, samples->pSample, + sample_size, &wg_format, &samples->dwStatus))) + wg_sample_queue_flush(decoder->wg_sample_queue, false); + + if (hr == MF_E_TRANSFORM_STREAM_CHANGE) + { + decoder->wg_format = wg_format; + + if (FAILED(hr = MFCalculateImageSize(&subtype, decoder->wg_format.u.video.width, + decoder->wg_format.u.video.height, &sample_size))) + return hr; + decoder->output_info.cbSize = sample_size; + + /* keep the frame rate that was requested, GStreamer doesn't provide any */ + if (SUCCEEDED(IMFMediaType_GetUINT64(decoder->output_type, &MF_MT_FRAME_RATE, &frame_rate))) + { + decoder->wg_format.u.video.fps_n = frame_rate >> 32; + decoder->wg_format.u.video.fps_d = (UINT32)frame_rate; + } + + samples[0].dwStatus |= MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE; + *status |= MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE; + } + + TRACE("returning %#lx.\n", hr); + return hr; +} + +static const IMFTransformVtbl transform_vtbl = +{ + transform_QueryInterface, + transform_AddRef, + transform_Release, + transform_GetStreamLimits, + transform_GetStreamCount, + transform_GetStreamIDs, + transform_GetInputStreamInfo, + transform_GetOutputStreamInfo, + transform_GetAttributes, + transform_GetInputStreamAttributes, + transform_GetOutputStreamAttributes, + transform_DeleteInputStream, + transform_AddInputStreams, + transform_GetInputAvailableType, + transform_GetOutputAvailableType, + transform_SetInputType, + transform_SetOutputType, + transform_GetInputCurrentType, + transform_GetOutputCurrentType, + transform_GetInputStatus, + transform_GetOutputStatus, + transform_SetOutputBounds, + transform_ProcessEvent, + transform_ProcessMessage, + transform_ProcessInput, + transform_ProcessOutput, +}; + +HRESULT WINAPI winegstreamer_create_video_decoder(IMFTransform **out) +{ + struct video_decoder *decoder; + HRESULT hr; + + TRACE("out %p.\n", out); + + if (!(decoder = calloc(1, sizeof(*decoder)))) + return E_OUTOFMEMORY; + + decoder->IMFTransform_iface.lpVtbl = &transform_vtbl; + decoder->refcount = 1; + decoder->wg_format.u.video.format = WG_VIDEO_FORMAT_UNKNOWN; + decoder->wg_format.u.video.width = 1920; + decoder->wg_format.u.video.height = 1080; + decoder->wg_format.u.video.fps_n = 30000; + decoder->wg_format.u.video.fps_d = 1001; + + decoder->input_info.dwFlags = MFT_INPUT_STREAM_WHOLE_SAMPLES | MFT_INPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER + | MFT_INPUT_STREAM_FIXED_SAMPLE_SIZE; + decoder->input_info.cbSize = 0x1000; + decoder->output_info.dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER + | MFT_OUTPUT_STREAM_FIXED_SAMPLE_SIZE; + decoder->output_info.cbSize = 1920 * 1080 * 4; + + if (FAILED(hr = MFCreateAttributes(&decoder->attributes, 16))) + goto failed; + if (FAILED(hr = IMFAttributes_SetUINT32(decoder->attributes, &MF_LOW_LATENCY, 0))) + goto failed; + if (FAILED(hr = MFCreateAttributes(&decoder->output_attributes, 0))) + goto failed; + if (FAILED(hr = wg_sample_queue_create(&decoder->wg_sample_queue))) + goto failed; + + *out = &decoder->IMFTransform_iface; + TRACE("created decoder %p.\n", *out); + return S_OK; + +failed: + if (decoder->output_attributes) + IMFAttributes_Release(decoder->output_attributes); + if (decoder->attributes) + IMFAttributes_Release(decoder->attributes); + free(decoder); + return hr; +} diff --git a/dlls/winegstreamer/winegstreamer.spec b/dlls/winegstreamer/winegstreamer.spec index 9804e324044..809c7208981 100644 --- a/dlls/winegstreamer/winegstreamer.spec +++ b/dlls/winegstreamer/winegstreamer.spec @@ -2,4 +2,5 @@ @ stdcall -private DllGetClassObject(ptr ptr ptr) @ stdcall -private DllRegisterServer() @ stdcall -private DllUnregisterServer() +@ stdcall winegstreamer_create_video_decoder(ptr) @ stdcall winegstreamer_create_wm_sync_reader(ptr ptr)