From: Shaun Ren sren@codeweavers.com
--- dlls/winegstreamer/video_decoder.c | 425 +++++++++++++++++++++++++++-- 1 file changed, 400 insertions(+), 25 deletions(-)
diff --git a/dlls/winegstreamer/video_decoder.c b/dlls/winegstreamer/video_decoder.c index 5706bd13325..90f0b43adc9 100644 --- a/dlls/winegstreamer/video_decoder.c +++ b/dlls/winegstreamer/video_decoder.c @@ -29,43 +29,190 @@
WINE_DEFAULT_DEBUG_CHANNEL(mfplat);
+DEFINE_MEDIATYPE_GUID(MFVideoFormat_IV50, MAKEFOURCC('I','V','5','0')); + +static const GUID *const input_types[] = +{ + &MFVideoFormat_H264, + &MFVideoFormat_IV50, +}; +static const GUID *const output_types[] = +{ + &MFVideoFormat_YV12, + &MFVideoFormat_YUY2, + &MFVideoFormat_NV11, + &MFVideoFormat_NV12, + &MFVideoFormat_RGB32, + &MFVideoFormat_RGB24, + &MFVideoFormat_RGB565, + &MFVideoFormat_RGB555, + &MFVideoFormat_RGB8, +}; + +struct video_decoder +{ + IMFTransform IMFTransform_iface; + LONG refcount; + + IMFMediaType *input_type; + IMFMediaType *output_type; + + struct wg_format wg_format; + struct wg_transform *wg_transform; + struct wg_sample_queue *wg_sample_queue; +}; + +static struct video_decoder *impl_from_IMFTransform(IMFTransform *iface) +{ + return CONTAINING_RECORD(iface, struct video_decoder, IMFTransform_iface); +} + +static HRESULT try_create_wg_transform(struct video_decoder *decoder) +{ + struct wg_format input_format; + struct wg_format output_format; + + if (decoder->wg_transform) + wg_transform_destroy(decoder->wg_transform); + decoder->wg_transform = NULL; + + mf_media_type_to_wg_format(decoder->input_type, &input_format); + if (input_format.major_type == WG_MAJOR_TYPE_UNKNOWN) + return MF_E_INVALIDMEDIATYPE; + + mf_media_type_to_wg_format(decoder->output_type, &output_format); + if (output_format.major_type == WG_MAJOR_TYPE_UNKNOWN) + return MF_E_INVALIDMEDIATYPE; + + /* Don't force any specific size, the video stream already has the metadata for it + * and will generate a MF_E_TRANSFORM_STREAM_CHANGE result later. + */ + output_format.u.video.width = 0; + output_format.u.video.height = 0; + output_format.u.video.fps_d = 0; + output_format.u.video.fps_n = 0; + + if (!(decoder->wg_transform = wg_transform_create(&input_format, &output_format))) + { + ERR("Failed to create transform with input major_type %u.\n", input_format.major_type); + return E_FAIL; + } + + return S_OK; +} + +static HRESULT fill_output_media_type(struct video_decoder *decoder, IMFMediaType *media_type) +{ + struct wg_format *wg_format = &decoder->wg_format; + UINT32 value, width, height; + UINT64 ratio; + GUID subtype; + HRESULT hr; + + if (FAILED(hr = IMFMediaType_GetGUID(media_type, &MF_MT_SUBTYPE, &subtype))) + return hr; + + if (FAILED(hr = IMFMediaType_GetUINT64(media_type, &MF_MT_FRAME_SIZE, &ratio))) + { + ratio = (UINT64)wg_format->u.video.width << 32 | wg_format->u.video.height; + if (FAILED(hr = IMFMediaType_SetUINT64(media_type, &MF_MT_FRAME_SIZE, ratio))) + return hr; + } + width = ratio >> 32; + height = ratio; + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_FRAME_RATE, NULL))) + { + ratio = (UINT64)wg_format->u.video.fps_n << 32 | wg_format->u.video.fps_d; + if (FAILED(hr = IMFMediaType_SetUINT64(media_type, &MF_MT_FRAME_RATE, ratio))) + return hr; + } + + if (FAILED(hr = IMFMediaType_GetItem(media_type, &MF_MT_SAMPLE_SIZE, NULL))) + { + if (FAILED(hr = MFCalculateImageSize(&subtype, width, height, &value))) + return hr; + if (FAILED(hr = IMFMediaType_SetUINT32(media_type, &MF_MT_SAMPLE_SIZE, value))) + return hr; + } + + return S_OK; +} + static HRESULT WINAPI transform_QueryInterface(IMFTransform *iface, REFIID iid, void **out) { - FIXME("iface %p, iid %s, out %p.\n", iface, debugstr_guid(iid), out); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + + TRACE("iface %p, iid %s, out %p.\n", iface, debugstr_guid(iid), out); + + if (IsEqualGUID(iid, &IID_IUnknown) || + IsEqualGUID(iid, &IID_IMFTransform)) + *out = &decoder->IMFTransform_iface; + else + { + *out = NULL; + WARN("%s not implemented, returning E_NOINTERFACE.\n", debugstr_guid(iid)); + return E_NOINTERFACE; + } + + IUnknown_AddRef((IUnknown *)*out); + return S_OK; }
static ULONG WINAPI transform_AddRef(IMFTransform *iface) { - FIXME("iface %p.\n", iface); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + ULONG refcount = InterlockedIncrement(&decoder->refcount); + + TRACE("iface %p increasing refcount to %lu.\n", decoder, refcount); + + return refcount; }
static ULONG WINAPI transform_Release(IMFTransform *iface) { - FIXME("iface %p.\n", iface); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + ULONG refcount = InterlockedDecrement(&decoder->refcount); + + TRACE("iface %p decreasing refcount to %lu.\n", decoder, refcount); + + if (!refcount) + { + if (decoder->wg_transform) + wg_transform_destroy(decoder->wg_transform); + if (decoder->input_type) + IMFMediaType_Release(decoder->input_type); + if (decoder->output_type) + IMFMediaType_Release(decoder->output_type); + + wg_sample_queue_destroy(decoder->wg_sample_queue); + free(decoder); + } + + return refcount; }
static HRESULT WINAPI transform_GetStreamLimits(IMFTransform *iface, DWORD *input_minimum, DWORD *input_maximum, DWORD *output_minimum, DWORD *output_maximum) { - FIXME("iface %p, input_minimum %p, input_maximum %p, output_minimum %p, output_maximum %p.\n", + TRACE("iface %p, input_minimum %p, input_maximum %p, output_minimum %p, output_maximum %p.\n", iface, input_minimum, input_maximum, output_minimum, output_maximum); - return E_NOTIMPL; + *input_minimum = *input_maximum = *output_minimum = *output_maximum = 1; + return S_OK; }
static HRESULT WINAPI transform_GetStreamCount(IMFTransform *iface, DWORD *inputs, DWORD *outputs) { TRACE("iface %p, inputs %p, outputs %p.\n", iface, inputs, outputs); - return E_NOTIMPL; + *inputs = *outputs = 1; + return S_OK; }
static HRESULT WINAPI transform_GetStreamIDs(IMFTransform *iface, DWORD input_size, DWORD *inputs, DWORD output_size, DWORD *outputs) { - FIXME("iface %p, input_size %lu, inputs %p, output_size %lu, outputs %p.\n", iface, - input_size, inputs, output_size, outputs); + TRACE("iface %p, input_size %lu, inputs %p, output_size %lu, outputs %p.\n", iface, + input_size, inputs, output_size, outputs); return E_NOTIMPL; }
@@ -114,27 +261,175 @@ static HRESULT WINAPI transform_AddInputStreams(IMFTransform *iface, DWORD strea static HRESULT WINAPI transform_GetInputAvailableType(IMFTransform *iface, DWORD id, DWORD index, IMFMediaType **type) { - FIXME("iface %p, id %#lx, index %#lx, type %p.\n", iface, id, index, type); - return E_NOTIMPL; + IMFMediaType *media_type; + const GUID *subtype; + HRESULT hr; + + TRACE("iface %p, id %#lx, index %#lx, type %p.\n", iface, id, index, type); + + *type = NULL; + + if (index >= ARRAY_SIZE(input_types)) + return MF_E_NO_MORE_TYPES; + subtype = input_types[index]; + + if (FAILED(hr = MFCreateMediaType(&media_type))) + return hr; + + if (SUCCEEDED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_MAJOR_TYPE, &MFMediaType_Video)) && + SUCCEEDED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_SUBTYPE, subtype))) + IMFMediaType_AddRef((*type = media_type)); + + IMFMediaType_Release(media_type); + return hr; }
static HRESULT WINAPI transform_GetOutputAvailableType(IMFTransform *iface, DWORD id, DWORD index, IMFMediaType **type) { - FIXME("iface %p, id %#lx, index %#lx, type %p.\n", iface, id, index, type); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + IMFMediaType *media_type; + const GUID *output_type; + HRESULT hr; + + TRACE("iface %p, id %#lx, index %#lx, type %p.\n", iface, id, index, type); + + if (!decoder->input_type) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + *type = NULL; + + if (index >= ARRAY_SIZE(output_types)) + return MF_E_NO_MORE_TYPES; + output_type = output_types[index]; + + if (FAILED(hr = MFCreateMediaType(&media_type))) + return hr; + + if (FAILED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_MAJOR_TYPE, &MFMediaType_Video))) + goto done; + if (FAILED(hr = IMFMediaType_SetGUID(media_type, &MF_MT_SUBTYPE, output_type))) + goto done; + + hr = fill_output_media_type(decoder, media_type); + +done: + if (SUCCEEDED(hr)) + IMFMediaType_AddRef((*type = media_type)); + + IMFMediaType_Release(media_type); + return hr; }
static HRESULT WINAPI transform_SetInputType(IMFTransform *iface, DWORD id, IMFMediaType *type, DWORD flags) { - FIXME("iface %p, id %#lx, type %p, flags %#lx.\n", iface, id, type, flags); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + GUID major, subtype; + UINT64 frame_size; + HRESULT hr; + ULONG i; + + TRACE("iface %p, id %#lx, type %p, flags %#lx.\n", iface, id, type, flags); + + if (FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_MAJOR_TYPE, &major)) || + FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return E_INVALIDARG; + + if (!IsEqualGUID(&major, &MFMediaType_Video)) + return MF_E_INVALIDMEDIATYPE; + + for (i = 0; i < ARRAY_SIZE(input_types); ++i) + if (IsEqualGUID(&subtype, input_types[i])) + break; + if (i == ARRAY_SIZE(input_types)) + return MF_E_INVALIDMEDIATYPE; + if (flags & MFT_SET_TYPE_TEST_ONLY) + return S_OK; + + if (decoder->output_type) + { + IMFMediaType_Release(decoder->output_type); + decoder->output_type = NULL; + } + + if (decoder->input_type) + IMFMediaType_Release(decoder->input_type); + IMFMediaType_AddRef((decoder->input_type = type)); + + if (SUCCEEDED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size))) + { + decoder->wg_format.u.video.width = frame_size >> 32; + decoder->wg_format.u.video.height = (UINT32)frame_size; + } + + return S_OK; }
static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMFMediaType *type, DWORD flags) { - FIXME("iface %p, id %#lx, type %p, flags %#lx.\n", iface, id, type, flags); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + GUID major, subtype; + UINT64 frame_size; + HRESULT hr; + ULONG i; + + TRACE("iface %p, id %#lx, type %p, flags %#lx.\n", iface, id, type, flags); + + if (!decoder->input_type) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + if (FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_MAJOR_TYPE, &major)) || + FAILED(hr = IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return hr; + + if (!IsEqualGUID(&major, &MFMediaType_Video)) + return MF_E_INVALIDMEDIATYPE; + + for (i = 0; i < ARRAY_SIZE(output_types); ++i) + if (IsEqualGUID(&subtype, output_types[i])) + break; + if (i == ARRAY_SIZE(output_types)) + return MF_E_INVALIDMEDIATYPE; + + if (FAILED(hr = IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size)) + || (frame_size >> 32) != decoder->wg_format.u.video.width + || (UINT32)frame_size != decoder->wg_format.u.video.height) + return MF_E_INVALIDMEDIATYPE; + if (flags & MFT_SET_TYPE_TEST_ONLY) + return S_OK; + + if (decoder->output_type) + IMFMediaType_Release(decoder->output_type); + IMFMediaType_AddRef((decoder->output_type = type)); + + if (decoder->wg_transform) + { + struct wg_format output_format; + mf_media_type_to_wg_format(decoder->output_type, &output_format); + + /* Don't force any specific size, the video stream already has the metadata for it + * and will generate a MF_E_TRANSFORM_STREAM_CHANGE result later. + */ + output_format.u.video.width = 0; + output_format.u.video.height = 0; + output_format.u.video.fps_d = 0; + output_format.u.video.fps_n = 0; + + if (output_format.major_type == WG_MAJOR_TYPE_UNKNOWN + || !wg_transform_set_output_format(decoder->wg_transform, &output_format)) + { + IMFMediaType_Release(decoder->output_type); + decoder->output_type = NULL; + return MF_E_INVALIDMEDIATYPE; + } + } + else if (FAILED(hr = try_create_wg_transform(decoder))) + { + IMFMediaType_Release(decoder->output_type); + decoder->output_type = NULL; + } + + return hr; }
static HRESULT WINAPI transform_GetInputCurrentType(IMFTransform *iface, DWORD id, IMFMediaType **type) @@ -181,15 +476,71 @@ static HRESULT WINAPI transform_ProcessMessage(IMFTransform *iface, MFT_MESSAGE_
static HRESULT WINAPI transform_ProcessInput(IMFTransform *iface, DWORD id, IMFSample *sample, DWORD flags) { - FIXME("iface %p, id %#lx, sample %p, flags %#lx.\n", iface, id, sample, flags); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + HRESULT hr; + + TRACE("iface %p, id %#lx, sample %p, flags %#lx.\n", iface, id, sample, flags); + + if (!decoder->wg_transform) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + hr = wg_transform_push_mf(decoder->wg_transform, sample, decoder->wg_sample_queue); + + return hr; }
static HRESULT WINAPI transform_ProcessOutput(IMFTransform *iface, DWORD flags, DWORD count, MFT_OUTPUT_DATA_BUFFER *samples, DWORD *status) { - FIXME("iface %p, flags %#lx, count %lu, samples %p, status %p.\n", iface, flags, count, samples, status); - return E_NOTIMPL; + struct video_decoder *decoder = impl_from_IMFTransform(iface); + struct wg_format wg_format; + UINT32 sample_size; + UINT64 frame_rate; + GUID subtype; + HRESULT hr; + + TRACE("iface %p, flags %#lx, count %lu, samples %p, status %p.\n", iface, flags, count, samples, status); + + if (count != 1) + return E_INVALIDARG; + + if (!decoder->wg_transform) + return MF_E_TRANSFORM_TYPE_NOT_SET; + + *status = samples->dwStatus = 0; + if (!samples->pSample) + return E_INVALIDARG; + + if (FAILED(hr = IMFMediaType_GetGUID(decoder->output_type, &MF_MT_SUBTYPE, &subtype))) + return hr; + if (FAILED(hr = MFCalculateImageSize(&subtype, decoder->wg_format.u.video.width, + decoder->wg_format.u.video.height, &sample_size))) + return hr; + + if (SUCCEEDED(hr = wg_transform_read_mf(decoder->wg_transform, samples->pSample, + sample_size, &wg_format, &samples->dwStatus))) + wg_sample_queue_flush(decoder->wg_sample_queue, false); + + if (hr == MF_E_TRANSFORM_STREAM_CHANGE) + { + decoder->wg_format = wg_format; + + if (FAILED(hr = MFCalculateImageSize(&subtype, decoder->wg_format.u.video.width, + decoder->wg_format.u.video.height, &sample_size))) + return hr; + + /* keep the frame rate that was requested, GStreamer doesn't provide any */ + if (SUCCEEDED(IMFMediaType_GetUINT64(decoder->output_type, &MF_MT_FRAME_RATE, &frame_rate))) + { + decoder->wg_format.u.video.fps_n = frame_rate >> 32; + decoder->wg_format.u.video.fps_d = (UINT32)frame_rate; + } + + samples[0].dwStatus |= MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE; + *status |= MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE; + } + + return hr; }
static const IMFTransformVtbl transform_vtbl = @@ -224,6 +575,30 @@ static const IMFTransformVtbl transform_vtbl =
HRESULT WINAPI winegstreamer_create_video_decoder(IMFTransform **out) { - FIXME("out %p.\n", out); - return E_NOTIMPL; + struct video_decoder *decoder; + HRESULT hr; + + TRACE("out %p.\n", out); + + if (!(decoder = calloc(1, sizeof(*decoder)))) + return E_OUTOFMEMORY; + + decoder->IMFTransform_iface.lpVtbl = &transform_vtbl; + decoder->refcount = 1; + decoder->wg_format.u.video.format = WG_VIDEO_FORMAT_UNKNOWN; + decoder->wg_format.u.video.width = 1920; + decoder->wg_format.u.video.height = 1080; + decoder->wg_format.u.video.fps_n = 30000; + decoder->wg_format.u.video.fps_d = 1001; + + if (FAILED(hr = wg_sample_queue_create(&decoder->wg_sample_queue))) + goto failed; + + *out = &decoder->IMFTransform_iface; + TRACE("created decoder %p.\n", *out); + return S_OK; + +failed: + free(decoder); + return hr; }