Signed-off-by: Derek Lesho dlesho@codeweavers.com --- v2: - Use fixed list of supported formats in the videoconvert path instead of querying the template. - Moved code setting "caps" appsink property to patch 4, where it begins to be used. - Addressed some comments. --- dlls/winegstreamer/media_source.c | 124 ++++++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 14 deletions(-)
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index 5f3c43a0204..cbee412030d 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -384,6 +384,43 @@ static const IMFMediaStreamVtbl media_stream_vtbl = media_stream_RequestSample };
+/* Setup a chain of elements which should hopefully allow transformations to any IMFMediaType + the user throws at us through gstreamer's caps negotiation. */ +static HRESULT media_stream_connect_to_sink(struct media_stream *stream) +{ + GstCaps *source_caps = gst_pad_query_caps(stream->their_src, NULL); + const gchar *stream_type; + + if (!source_caps) + return E_FAIL; + + stream_type = gst_structure_get_name(gst_caps_get_structure(source_caps, 0)); + gst_caps_unref(source_caps); + + if (!strcmp(stream_type, "video/x-raw")) + { + GstElement *videoconvert = gst_element_factory_make("videoconvert", NULL); + + gst_bin_add(GST_BIN(stream->parent_source->container), videoconvert); + + stream->my_sink = gst_element_get_static_pad(videoconvert, "sink"); + + if (!gst_element_link(videoconvert, stream->appsink)) + return E_FAIL; + + gst_element_sync_state_with_parent(videoconvert); + } + else + { + stream->my_sink = gst_element_get_static_pad(stream->appsink, "sink"); + } + + if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) + return E_FAIL; + + return S_OK; +} + static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD stream_id, struct media_stream **out_stream) { struct media_stream *object = heap_alloc_zero(sizeof(*object)); @@ -414,8 +451,8 @@ static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD g_object_set(object->appsink, "sync", FALSE, NULL); g_object_set(object->appsink, "max-buffers", 5, NULL);
- object->my_sink = gst_element_get_static_pad(object->appsink, "sink"); - gst_pad_link(object->their_src, object->my_sink); + if (FAILED(hr = media_stream_connect_to_sink(object))) + goto fail;
gst_element_sync_state_with_parent(object->appsink);
@@ -435,28 +472,87 @@ static HRESULT media_stream_init_desc(struct media_stream *stream) { GstCaps *current_caps = gst_pad_get_current_caps(stream->their_src); IMFMediaTypeHandler *type_handler; + IMFMediaType **stream_types = NULL; IMFMediaType *stream_type = NULL; + DWORD type_count = 0; + const gchar *major_type; + unsigned int i; HRESULT hr;
- stream_type = mf_media_type_from_caps(current_caps); - gst_caps_unref(current_caps); - if (!stream_type) - return E_FAIL; + major_type = gst_structure_get_name(gst_caps_get_structure(current_caps, 0)); + + if (!strcmp(major_type, "video/x-raw")) + { + /* These are the most common native output types of decoders: https://docs.microsoft.com/en-us/windows/win32/medfound/mft-decoder-expose-o... */ + static const GUID * video_types[] = + { + &MFVideoFormat_NV12, + &MFVideoFormat_YV12, + &MFVideoFormat_YUY2, + &MFVideoFormat_IYUV, + &MFVideoFormat_I420, + };
- hr = MFCreateStreamDescriptor(stream->stream_id, 1, &stream_type, &stream->descriptor); + IMFMediaType *base_type = mf_media_type_from_caps(current_caps); + GUID base_subtype;
- IMFMediaType_Release(stream_type); + IMFMediaType_GetGUID(base_type, &MF_MT_SUBTYPE, &base_subtype);
- if (FAILED(hr)) - return hr; + stream_types = heap_alloc( sizeof(IMFMediaType *) * ARRAY_SIZE(video_types) + 1);
- if (FAILED(hr = IMFStreamDescriptor_GetMediaTypeHandler(stream->descriptor, &type_handler))) - return hr; + stream_types[0] = base_type; + type_count = 1;
- hr = IMFMediaTypeHandler_SetCurrentMediaType(type_handler, stream_type); + for (i = 0; i < ARRAY_SIZE(video_types); i++) + { + IMFMediaType *new_type;
- IMFMediaTypeHandler_Release(type_handler); + if (IsEqualGUID(&base_subtype, video_types[i])) + continue;
+ if (FAILED(hr = MFCreateMediaType(&new_type))) + goto done; + stream_types[type_count++] = new_type; + + if (FAILED(hr = IMFMediaType_CopyAllItems(base_type, (IMFAttributes *) new_type))) + goto done; + if (FAILED(hr = IMFMediaType_SetGUID(new_type, &MF_MT_SUBTYPE, video_types[i]))) + goto done; + } + } + else + { + stream_type = mf_media_type_from_caps(current_caps); + if (stream_type) + { + stream_types = &stream_type; + type_count = 1; + } + } + + if (!type_count) + { + ERR("Failed to establish an IMFMediaType from any of the possible stream caps!\n"); + return E_FAIL; + } + + if (FAILED(hr = MFCreateStreamDescriptor(stream->stream_id, type_count, stream_types, &stream->descriptor))) + goto done; + + if (FAILED(hr = IMFStreamDescriptor_GetMediaTypeHandler(stream->descriptor, &type_handler))) + goto done; + + if (FAILED(hr = IMFMediaTypeHandler_SetCurrentMediaType(type_handler, stream_types[0]))) + goto done; + +done: + gst_caps_unref(current_caps); + if (type_handler) + IMFMediaTypeHandler_Release(type_handler); + for (i = 0; i < type_count; i++) + IMFMediaType_Release(stream_types[i]); + if (stream_types != &stream_type) + heap_free(stream_types); return hr; }
Signed-off-by: Derek Lesho dlesho@codeweavers.com --- v2: Addressed comment. --- dlls/winegstreamer/media_source.c | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index cbee412030d..a31fa1d6f6c 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -410,6 +410,19 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream)
gst_element_sync_state_with_parent(videoconvert); } + else if (!strcmp(stream_type, "audio/x-raw")) + { + GstElement *audioconvert = gst_element_factory_make("audioconvert", NULL); + + gst_bin_add(GST_BIN(stream->parent_source->container), audioconvert); + + stream->my_sink = gst_element_get_static_pad(audioconvert, "sink"); + + assert(gst_pad_link(stream->their_src, stream->my_sink) == GST_PAD_LINK_OK); + assert(gst_element_link(audioconvert, stream->appsink)); + + gst_element_sync_state_with_parent(audioconvert); + } else { stream->my_sink = gst_element_get_static_pad(stream->appsink, "sink");
On 10/6/20 10:59 AM, Derek Lesho wrote:
Signed-off-by: Derek Lesho dlesho@codeweavers.com
v2: Addressed comment.
dlls/winegstreamer/media_source.c | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index cbee412030d..a31fa1d6f6c 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -410,6 +410,19 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream)
gst_element_sync_state_with_parent(videoconvert); }
- else if (!strcmp(stream_type, "audio/x-raw"))
- {
GstElement *audioconvert = gst_element_factory_make("audioconvert", NULL);
gst_bin_add(GST_BIN(stream->parent_source->container), audioconvert);
stream->my_sink = gst_element_get_static_pad(audioconvert, "sink");
assert(gst_pad_link(stream->their_src, stream->my_sink) == GST_PAD_LINK_OK);
This is already done outside of the if/else block in patch 1/5 now.
assert(gst_element_link(audioconvert, stream->appsink));
You use "assert" here, but return E_FAIL in the first patch.
gst_element_sync_state_with_parent(audioconvert);
- } else { stream->my_sink = gst_element_get_static_pad(stream->appsink, "sink");
Signed-off-by: Derek Lesho dlesho@codeweavers.com --- v2: Addressed comments. --- dlls/mf/tests/mf.c | 1 - dlls/mfplat/tests/mfplat.c | 10 +++++----- dlls/winegstreamer/media_source.c | 29 +++++++++++++++++++++++++++-- 3 files changed, 32 insertions(+), 8 deletions(-)
diff --git a/dlls/mf/tests/mf.c b/dlls/mf/tests/mf.c index 0bc297cc69c..9c4d18478c6 100644 --- a/dlls/mf/tests/mf.c +++ b/dlls/mf/tests/mf.c @@ -1451,7 +1451,6 @@ todo_wine return;
hr = IMFMediaSource_CreatePresentationDescriptor(source, &pd); -todo_wine ok(hr == S_OK, "Failed to create descriptor, hr %#x.\n", hr); if (FAILED(hr)) return; diff --git a/dlls/mfplat/tests/mfplat.c b/dlls/mfplat/tests/mfplat.c index 309f7b669a4..15d5bcba3d6 100644 --- a/dlls/mfplat/tests/mfplat.c +++ b/dlls/mfplat/tests/mfplat.c @@ -576,10 +576,7 @@ static void test_source_resolver(void) ok(obj_type == MF_OBJECT_MEDIASOURCE, "got %d\n", obj_type);
hr = IMFMediaSource_CreatePresentationDescriptor(mediasource, &descriptor); -todo_wine ok(hr == S_OK, "Failed to get presentation descriptor, hr %#x.\n", hr); - if (FAILED(hr)) - goto skip_source_tests; ok(descriptor != NULL, "got %p\n", descriptor);
hr = IMFPresentationDescriptor_GetStreamDescriptorByIndex(descriptor, 0, &selected, &sd); @@ -599,6 +596,7 @@ todo_wine ok(hr == S_OK, "Failed to get current media type, hr %#x.\n", hr); hr = IMFMediaType_GetGUID(media_type, &MF_MT_SUBTYPE, &guid); ok(hr == S_OK, "Failed to get media sub type, hr %#x.\n", hr); +todo_wine ok(IsEqualGUID(&guid, &MFVideoFormat_M4S2), "Unexpected sub type %s.\n", debugstr_guid(&guid)); IMFMediaType_Release(media_type);
@@ -607,7 +605,10 @@ todo_wine
var.vt = VT_EMPTY; hr = IMFMediaSource_Start(mediasource, descriptor, &GUID_NULL, &var); +todo_wine ok(hr == S_OK, "Failed to start media source, hr %#x.\n", hr); + if (FAILED(hr)) + goto skip_source_tests;
get_event((IMFMediaEventGenerator *)mediasource, MENewStream, &var); ok(var.vt == VT_UNKNOWN, "Unexpected value type %u from MENewStream event.\n", var.vt); @@ -670,11 +671,10 @@ todo_wine
get_event((IMFMediaEventGenerator *)mediasource, MEEndOfPresentation, NULL);
+skip_source_tests: IMFMediaTypeHandler_Release(handler); IMFPresentationDescriptor_Release(descriptor);
-skip_source_tests: - hr = IMFMediaSource_Shutdown(mediasource); ok(hr == S_OK, "Unexpected hr %#x.\n", hr);
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index a31fa1d6f6c..c70de184f0b 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -67,6 +67,7 @@ struct media_source IMFByteStream *byte_stream; struct media_stream **streams; ULONG stream_count; + IMFPresentationDescriptor *pres_desc; GstBus *bus; GstElement *container; GstElement *decodebin; @@ -672,12 +673,12 @@ static HRESULT WINAPI media_source_CreatePresentationDescriptor(IMFMediaSource * { struct media_source *source = impl_from_IMFMediaSource(iface);
- FIXME("(%p)->(%p): stub\n", source, descriptor); + TRACE("(%p)->(%p)\n", source, descriptor);
if (source->state == SOURCE_SHUTDOWN) return MF_E_SHUTDOWN;
- return E_NOTIMPL; + return IMFPresentationDescriptor_Clone(source->pres_desc, descriptor); }
static HRESULT WINAPI media_source_Start(IMFMediaSource *iface, IMFPresentationDescriptor *descriptor, @@ -740,6 +741,8 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->their_sink) gst_object_unref(GST_OBJECT(source->their_sink));
+ if (source->pres_desc) + IMFPresentationDescriptor_Release(source->pres_desc); if (source->event_queue) IMFMediaEventQueue_Shutdown(source->event_queue); if (source->byte_stream) @@ -840,6 +843,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ GST_STATIC_PAD_TEMPLATE("mf_src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS_ANY);
struct media_source *object = heap_alloc_zero(sizeof(*object)); + IMFStreamDescriptor **descriptors = NULL; unsigned int i; HRESULT hr; int ret; @@ -927,6 +931,25 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ gst_sample_unref(preroll); }
+ /* init presentation descriptor */ + + descriptors = heap_alloc(object->stream_count * sizeof(IMFStreamDescriptor*)); + for (i = 0; i < object->stream_count; i++) + { + IMFMediaStream_GetStreamDescriptor(&object->streams[i]->IMFMediaStream_iface, &descriptors[i]); + } + + if (FAILED(hr = MFCreatePresentationDescriptor(object->stream_count, descriptors, &object->pres_desc))) + goto fail; + + for (i = 0; i < object->stream_count; i++) + { + IMFPresentationDescriptor_SelectStream(object->pres_desc, i); + IMFStreamDescriptor_Release(descriptors[i]); + } + heap_free(descriptors); + descriptors = NULL; + object->state = SOURCE_STOPPED;
*out_media_source = object; @@ -935,6 +958,8 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ fail: WARN("Failed to construct MFMediaSource, hr %#x.\n", hr);
+ if (descriptors) + heap_free(descriptors); IMFMediaSource_Release(&object->IMFMediaSource_iface); return hr; }
Hi,
While running your changed tests, I think I found new failures. Being a bot and all I'm not very good at pattern recognition, so I might be wrong, but could you please double-check?
Full results can be found at: https://testbot.winehq.org/JobDetails.pl?Key=79962
Your paranoid android.
=== debiant (32 bit report) ===
mf: mf.c:1449: Test failed: Failed to create source, hr 0x80070057.
=== debiant (32 bit French report) ===
mf: mf.c:1449: Test failed: Failed to create source, hr 0x80070057.
=== debiant (32 bit Japanese:Japan report) ===
mf: mf.c:1449: Test failed: Failed to create source, hr 0x80070057.
=== debiant (32 bit Chinese:China report) ===
mfplat: mfplat.c:2698: Test failed: Unexpected counter value 0. Unhandled exception: page fault on execute access to 0x0a2e7823 in 32-bit code (0x0a2e7823).
mf: mf.c:1449: Test failed: Failed to create source, hr 0x80070057.
Report validation errors: mfplat:mfplat crashed (c0000005)
=== debiant (32 bit WoW report) ===
mf: mf.c:1449: Test failed: Failed to create source, hr 0x80070057.
=== debiant (64 bit WoW report) ===
mf: mf.c:1449: Test failed: Failed to create source, hr 0x80070057.
On 10/6/20 10:59 AM, Derek Lesho wrote:
Signed-off-by: Derek Lesho dlesho@codeweavers.com
v2: Addressed comments.
dlls/mf/tests/mf.c | 1 - dlls/mfplat/tests/mfplat.c | 10 +++++----- dlls/winegstreamer/media_source.c | 29 +++++++++++++++++++++++++++-- 3 files changed, 32 insertions(+), 8 deletions(-)
diff --git a/dlls/mf/tests/mf.c b/dlls/mf/tests/mf.c index 0bc297cc69c..9c4d18478c6 100644 --- a/dlls/mf/tests/mf.c +++ b/dlls/mf/tests/mf.c @@ -1451,7 +1451,6 @@ todo_wine return;
hr = IMFMediaSource_CreatePresentationDescriptor(source, &pd);
-todo_wine ok(hr == S_OK, "Failed to create descriptor, hr %#x.\n", hr); if (FAILED(hr)) return; diff --git a/dlls/mfplat/tests/mfplat.c b/dlls/mfplat/tests/mfplat.c index 309f7b669a4..15d5bcba3d6 100644 --- a/dlls/mfplat/tests/mfplat.c +++ b/dlls/mfplat/tests/mfplat.c @@ -576,10 +576,7 @@ static void test_source_resolver(void) ok(obj_type == MF_OBJECT_MEDIASOURCE, "got %d\n", obj_type);
hr = IMFMediaSource_CreatePresentationDescriptor(mediasource, &descriptor);
-todo_wine ok(hr == S_OK, "Failed to get presentation descriptor, hr %#x.\n", hr);
if (FAILED(hr))
goto skip_source_tests;
ok(descriptor != NULL, "got %p\n", descriptor);
hr = IMFPresentationDescriptor_GetStreamDescriptorByIndex(descriptor, 0, &selected, &sd);
@@ -599,6 +596,7 @@ todo_wine ok(hr == S_OK, "Failed to get current media type, hr %#x.\n", hr); hr = IMFMediaType_GetGUID(media_type, &MF_MT_SUBTYPE, &guid); ok(hr == S_OK, "Failed to get media sub type, hr %#x.\n", hr); +todo_wine ok(IsEqualGUID(&guid, &MFVideoFormat_M4S2), "Unexpected sub type %s.\n", debugstr_guid(&guid)); IMFMediaType_Release(media_type);
@@ -607,7 +605,10 @@ todo_wine
var.vt = VT_EMPTY; hr = IMFMediaSource_Start(mediasource, descriptor, &GUID_NULL, &var);
+todo_wine ok(hr == S_OK, "Failed to start media source, hr %#x.\n", hr);
if (FAILED(hr))
goto skip_source_tests;
get_event((IMFMediaEventGenerator *)mediasource, MENewStream, &var); ok(var.vt == VT_UNKNOWN, "Unexpected value type %u from MENewStream event.\n", var.vt);
@@ -670,11 +671,10 @@ todo_wine
get_event((IMFMediaEventGenerator *)mediasource, MEEndOfPresentation, NULL);
+skip_source_tests: IMFMediaTypeHandler_Release(handler); IMFPresentationDescriptor_Release(descriptor);
-skip_source_tests:
- hr = IMFMediaSource_Shutdown(mediasource); ok(hr == S_OK, "Unexpected hr %#x.\n", hr);
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index a31fa1d6f6c..c70de184f0b 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -67,6 +67,7 @@ struct media_source IMFByteStream *byte_stream; struct media_stream **streams; ULONG stream_count;
- IMFPresentationDescriptor *pres_desc; GstBus *bus; GstElement *container; GstElement *decodebin;
@@ -672,12 +673,12 @@ static HRESULT WINAPI media_source_CreatePresentationDescriptor(IMFMediaSource * { struct media_source *source = impl_from_IMFMediaSource(iface);
- FIXME("(%p)->(%p): stub\n", source, descriptor);
TRACE("(%p)->(%p)\n", source, descriptor);
if (source->state == SOURCE_SHUTDOWN) return MF_E_SHUTDOWN;
- return E_NOTIMPL;
- return IMFPresentationDescriptor_Clone(source->pres_desc, descriptor);
}
static HRESULT WINAPI media_source_Start(IMFMediaSource *iface, IMFPresentationDescriptor *descriptor, @@ -740,6 +741,8 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->their_sink) gst_object_unref(GST_OBJECT(source->their_sink));
- if (source->pres_desc)
if (source->event_queue) IMFMediaEventQueue_Shutdown(source->event_queue); if (source->byte_stream)IMFPresentationDescriptor_Release(source->pres_desc);
@@ -840,6 +843,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ GST_STATIC_PAD_TEMPLATE("mf_src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS_ANY);
struct media_source *object = heap_alloc_zero(sizeof(*object));
- IMFStreamDescriptor **descriptors = NULL; unsigned int i; HRESULT hr; int ret;
@@ -927,6 +931,25 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ gst_sample_unref(preroll); }
/* init presentation descriptor */
descriptors = heap_alloc(object->stream_count * sizeof(IMFStreamDescriptor*));
for (i = 0; i < object->stream_count; i++)
{
IMFMediaStream_GetStreamDescriptor(&object->streams[i]->IMFMediaStream_iface, &descriptors[i]);
}
if (FAILED(hr = MFCreatePresentationDescriptor(object->stream_count, descriptors, &object->pres_desc)))
goto fail;
for (i = 0; i < object->stream_count; i++)
{
IMFPresentationDescriptor_SelectStream(object->pres_desc, i);
IMFStreamDescriptor_Release(descriptors[i]);
}
heap_free(descriptors);
descriptors = NULL;
object->state = SOURCE_STOPPED;
*out_media_source = object;
@@ -935,6 +958,8 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ fail: WARN("Failed to construct MFMediaSource, hr %#x.\n", hr);
- if (descriptors)
This
IMFMediaSource_Release(&object->IMFMediaSource_iface); return hr;heap_free(descriptors);
}
On 10/6/20 9:12 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
@@ -935,6 +958,8 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ fail: WARN("Failed to construct MFMediaSource, hr %#x.\n", hr);
- if (descriptors)
This
What about this?
On 10/7/20 9:08 AM, Derek Lesho wrote:
On 10/6/20 9:12 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
@@ -935,6 +958,8 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ fail: WARN("Failed to construct MFMediaSource, hr %#x.\n", hr);
- if (descriptors)
This
What about this?
Apologies. What I was trying to say is that this condition is superfluous.
Signed-off-by: Derek Lesho dlesho@codeweavers.com --- v2: - Squashed prior commit for IMFMediaType conversion in. - Replaced usage of gst_bus_poll with better (not perfect) alternative. - Addressed comments. --- dlls/mfplat/tests/mfplat.c | 8 +- dlls/winegstreamer/gst_private.h | 1 + dlls/winegstreamer/media_source.c | 316 +++++++++++++++++++++++++++++- dlls/winegstreamer/mfplat.c | 149 ++++++++++++++ 4 files changed, 466 insertions(+), 8 deletions(-)
diff --git a/dlls/mfplat/tests/mfplat.c b/dlls/mfplat/tests/mfplat.c index 15d5bcba3d6..bffce2bc114 100644 --- a/dlls/mfplat/tests/mfplat.c +++ b/dlls/mfplat/tests/mfplat.c @@ -605,10 +605,7 @@ todo_wine
var.vt = VT_EMPTY; hr = IMFMediaSource_Start(mediasource, descriptor, &GUID_NULL, &var); -todo_wine ok(hr == S_OK, "Failed to start media source, hr %#x.\n", hr); - if (FAILED(hr)) - goto skip_source_tests;
get_event((IMFMediaEventGenerator *)mediasource, MENewStream, &var); ok(var.vt == VT_UNKNOWN, "Unexpected value type %u from MENewStream event.\n", var.vt); @@ -626,10 +623,13 @@ todo_wine hr = IMFMediaStream_RequestSample(video_stream, NULL); if (i == sample_count) break; +todo_wine ok(hr == S_OK, "Failed to request sample %u, hr %#x.\n", i + 1, hr); if (hr != S_OK) break; } + if (FAILED(hr)) + goto skip_source_tests;
for (i = 0; i < sample_count; ++i) { @@ -667,11 +667,11 @@ todo_wine
hr = IMFMediaStream_RequestSample(video_stream, NULL); ok(hr == MF_E_END_OF_STREAM, "Unexpected hr %#x.\n", hr); - IMFMediaStream_Release(video_stream);
get_event((IMFMediaEventGenerator *)mediasource, MEEndOfPresentation, NULL);
skip_source_tests: + IMFMediaStream_Release(video_stream); IMFMediaTypeHandler_Release(handler); IMFPresentationDescriptor_Release(descriptor);
diff --git a/dlls/winegstreamer/gst_private.h b/dlls/winegstreamer/gst_private.h index 60b38a48f5a..07556802a51 100644 --- a/dlls/winegstreamer/gst_private.h +++ b/dlls/winegstreamer/gst_private.h @@ -57,6 +57,7 @@ extern HRESULT mfplat_get_class_object(REFCLSID rclsid, REFIID riid, void **obj)
HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN; IMFMediaType *mf_media_type_from_caps(const GstCaps *caps) DECLSPEC_HIDDEN; +GstCaps *caps_from_mf_media_type(IMFMediaType *type) DECLSPEC_HIDDEN;
HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN;
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index c70de184f0b..5eb4465fea6 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -55,14 +55,39 @@ struct media_stream { STREAM_INACTIVE, STREAM_SHUTDOWN, + STREAM_RUNNING, } state; DWORD stream_id; + BOOL eos; +}; + +enum source_async_op +{ + SOURCE_ASYNC_START, +}; + +struct source_async_command +{ + IUnknown IUnknown_iface; + LONG refcount; + enum source_async_op op; + union + { + struct + { + IMFPresentationDescriptor *descriptor; + GUID format; + PROPVARIANT position; + } start; + } u; };
struct media_source { IMFMediaSource IMFMediaSource_iface; + IMFAsyncCallback async_commands_callback; LONG ref; + DWORD async_commands_queue; IMFMediaEventQueue *event_queue; IMFByteStream *byte_stream; struct media_stream **streams; @@ -76,6 +101,7 @@ struct media_source { SOURCE_OPENING, SOURCE_STOPPED, + SOURCE_RUNNING, SOURCE_SHUTDOWN, } state; HANDLE no_more_pads_event; @@ -91,7 +117,266 @@ static inline struct media_source *impl_from_IMFMediaSource(IMFMediaSource *ifac return CONTAINING_RECORD(iface, struct media_source, IMFMediaSource_iface); }
-static GstFlowReturn bytestream_wrapper_pull(GstPad *pad, GstObject *parent, guint64 ofs, guint len, +static inline struct media_source *impl_from_async_commands_callback_IMFAsyncCallback(IMFAsyncCallback *iface) +{ + return CONTAINING_RECORD(iface, struct media_source, async_commands_callback); +} + +static inline struct source_async_command *impl_from_async_command_IUnknown(IUnknown *iface) +{ + return CONTAINING_RECORD(iface, struct source_async_command, IUnknown_iface); +} + +static HRESULT WINAPI source_async_command_QueryInterface(IUnknown *iface, REFIID riid, void **obj) +{ + if (IsEqualIID(riid, &IID_IUnknown)) + { + *obj = iface; + IUnknown_AddRef(iface); + return S_OK; + } + + WARN("Unsupported interface %s.\n", debugstr_guid(riid)); + *obj = NULL; + return E_NOINTERFACE; +} + +static ULONG WINAPI source_async_command_AddRef(IUnknown *iface) +{ + struct source_async_command *command = impl_from_async_command_IUnknown(iface); + return InterlockedIncrement(&command->refcount); +} + +static ULONG WINAPI source_async_command_Release(IUnknown *iface) +{ + struct source_async_command *command = impl_from_async_command_IUnknown(iface); + ULONG refcount = InterlockedDecrement(&command->refcount); + + if (!refcount) + { + if (command->op == SOURCE_ASYNC_START) + PropVariantClear(&command->u.start.position); + heap_free(command); + } + + return refcount; +} + +static const IUnknownVtbl source_async_command_vtbl = +{ + source_async_command_QueryInterface, + source_async_command_AddRef, + source_async_command_Release, +}; + +static HRESULT source_create_async_op(enum source_async_op op, struct source_async_command **ret) +{ + struct source_async_command *command; + + if (!(command = heap_alloc_zero(sizeof(*command)))) + return E_OUTOFMEMORY; + + command->IUnknown_iface.lpVtbl = &source_async_command_vtbl; + command->op = op; + + *ret = command; + + return S_OK; +} + +static HRESULT WINAPI callback_QueryInterface(IMFAsyncCallback *iface, REFIID riid, void **obj) +{ + TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); + + if (IsEqualIID(riid, &IID_IMFAsyncCallback) || + IsEqualIID(riid, &IID_IUnknown)) + { + *obj = iface; + IMFAsyncCallback_AddRef(iface); + return S_OK; + } + + WARN("Unsupported %s.\n", debugstr_guid(riid)); + *obj = NULL; + return E_NOINTERFACE; +} + +static HRESULT WINAPI callback_GetParameters(IMFAsyncCallback *iface, + DWORD *flags, DWORD *queue) +{ + return E_NOTIMPL; +} + +static ULONG WINAPI source_async_commands_callback_AddRef(IMFAsyncCallback *iface) +{ + struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface); + return IMFMediaSource_AddRef(&source->IMFMediaSource_iface); +} + +static ULONG WINAPI source_async_commands_callback_Release(IMFAsyncCallback *iface) +{ + struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface); + return IMFMediaSource_Release(&source->IMFMediaSource_iface); +} + +static IMFStreamDescriptor *stream_descriptor_from_id(IMFPresentationDescriptor *pres_desc, DWORD id, BOOL *selected) +{ + ULONG sd_count; + IMFStreamDescriptor *ret; + unsigned int i; + + if (FAILED(IMFPresentationDescriptor_GetStreamDescriptorCount(pres_desc, &sd_count))) + return NULL; + + for (i = 0; i < sd_count; i++) + { + DWORD stream_id; + + if (FAILED(IMFPresentationDescriptor_GetStreamDescriptorByIndex(pres_desc, i, selected, &ret))) + return NULL; + + if (SUCCEEDED(IMFStreamDescriptor_GetStreamIdentifier(ret, &stream_id)) && stream_id == id) + return ret; + + IMFStreamDescriptor_Release(ret); + } + return NULL; +} + +static void start_pipeline(struct media_source *source, struct source_async_command *command) +{ + PROPVARIANT *position = &command->u.start.position; + BOOL seek_message = source->state != SOURCE_STOPPED && position->vt != VT_EMPTY; + GstStateChangeReturn ret; + unsigned int i; + + gst_element_set_state(source->container, GST_STATE_PAUSED); + ret = gst_element_get_state(source->container, NULL, NULL, -1); + assert(ret == GST_STATE_CHANGE_SUCCESS); + + /* seek to beginning on stop->play */ + if (source->state == SOURCE_STOPPED && position->vt == VT_EMPTY) + { + position->vt = VT_I8; + position->u.hVal.QuadPart = 0; + } + + for (i = 0; i < source->stream_count; i++) + { + struct media_stream *stream; + IMFStreamDescriptor *sd; + IMFMediaTypeHandler *mth; + IMFMediaType *current_mt; + GstCaps *current_caps; + GstCaps *prev_caps; + DWORD stream_id; + BOOL was_active; + BOOL selected; + + stream = source->streams[i]; + + IMFStreamDescriptor_GetStreamIdentifier(stream->descriptor, &stream_id); + + sd = stream_descriptor_from_id(command->u.start.descriptor, stream_id, &selected); + IMFStreamDescriptor_Release(sd); + + was_active = stream->state != STREAM_INACTIVE; + + stream->state = selected ? STREAM_RUNNING : STREAM_INACTIVE; + + if (selected) + { + IMFStreamDescriptor_GetMediaTypeHandler(stream->descriptor, &mth); + IMFMediaTypeHandler_GetCurrentMediaType(mth, ¤t_mt); + current_caps = caps_from_mf_media_type(current_mt); + g_object_get(stream->appsink, "caps", &prev_caps, NULL); + if (!gst_caps_is_equal(prev_caps, current_caps)) + { + GstEvent *reconfigure_event = gst_event_new_reconfigure(); + g_object_set(stream->appsink, "caps", current_caps, NULL); + gst_pad_push_event(gst_element_get_static_pad(stream->appsink, "sink"), reconfigure_event); + } + + gst_caps_unref(current_caps); + gst_caps_unref(prev_caps); + IMFMediaType_Release(current_mt); + IMFMediaTypeHandler_Release(mth); + } + + if (position->vt != VT_EMPTY) + { + GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, + GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0); + GstSample *preroll; + + gst_pad_push_event(stream->my_sink, seek_event); + + /* this works because the pre-seek preroll is already removed by media_source_constructor */ + g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll); + if (preroll) + gst_sample_unref(preroll); + + stream->eos = FALSE; + } + + if (selected) + { + TRACE("Stream %u (%p) selected\n", i, stream); + IMFMediaEventQueue_QueueEventParamUnk(source->event_queue, + was_active ? MEUpdatedStream : MENewStream, &GUID_NULL, + S_OK, (IUnknown*) &stream->IMFMediaStream_iface); + + IMFMediaEventQueue_QueueEventParamVar(stream->event_queue, + seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position); + } + } + + IMFMediaEventQueue_QueueEventParamVar(source->event_queue, + seek_message ? MESourceSeeked : MESourceStarted, + &GUID_NULL, S_OK, position); + + source->state = SOURCE_RUNNING; + + gst_element_set_state(source->container, GST_STATE_PLAYING); + gst_element_get_state(source->container, NULL, NULL, -1); +} + +static HRESULT WINAPI source_async_commands_Invoke(IMFAsyncCallback *iface, IMFAsyncResult *result) +{ + struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface); + struct source_async_command *command; + IUnknown *state; + HRESULT hr; + + if (source->state == SOURCE_SHUTDOWN) + return S_OK; + + if (FAILED(hr = IMFAsyncResult_GetState(result, &state))) + return hr; + + command = impl_from_async_command_IUnknown(state); + switch (command->op) + { + case SOURCE_ASYNC_START: + start_pipeline(source, command); + break; + } + + IUnknown_Release(state); + + return S_OK; +} + +static const IMFAsyncCallbackVtbl source_async_commands_callback_vtbl = +{ + callback_QueryInterface, + source_async_commands_callback_AddRef, + source_async_commands_callback_Release, + callback_GetParameters, + source_async_commands_Invoke, +}; + +GstFlowReturn bytestream_wrapper_pull(GstPad *pad, GstObject *parent, guint64 ofs, guint len, GstBuffer **buf) { struct media_source *source = gst_pad_get_element_private(pad); @@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL;
+ g_object_set(stream->appsink, "caps", source_caps, NULL); + return S_OK; }
@@ -682,16 +969,30 @@ static HRESULT WINAPI media_source_CreatePresentationDescriptor(IMFMediaSource * }
static HRESULT WINAPI media_source_Start(IMFMediaSource *iface, IMFPresentationDescriptor *descriptor, - const GUID *time_format, const PROPVARIANT *start_position) + const GUID *time_format, const PROPVARIANT *position) { struct media_source *source = impl_from_IMFMediaSource(iface); + struct source_async_command *command; + HRESULT hr;
- FIXME("(%p)->(%p, %p, %p): stub\n", source, descriptor, time_format, start_position); + TRACE("(%p)->(%p, %p, %p)\n", source, descriptor, time_format, position);
if (source->state == SOURCE_SHUTDOWN) return MF_E_SHUTDOWN;
- return E_NOTIMPL; + if (!(IsEqualIID(time_format, &GUID_NULL))) + return MF_E_UNSUPPORTED_TIME_FORMAT; + + if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command))) + { + command->u.start.descriptor = descriptor; + command->u.start.format = *time_format; + PropVariantCopy(&command->u.start.position, position); + + hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface); + } + + return hr; }
static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface) @@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event);
+ if (source->async_commands_queue) + MFUnlockWorkQueue(source->async_commands_queue); + return S_OK; }
@@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY;
object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl; + object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream); @@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail;
+ if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue))) + goto fail; + object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL); diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps)
return media_type; } + +GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{ + GUID major_type; + GUID subtype; + GstCaps *output = NULL; + + if (FAILED(IMFMediaType_GetMajorType(type, &major_type))) + return NULL; + if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return NULL; + + if (IsEqualGUID(&major_type, &MFMediaType_Video)) + { + UINT64 frame_rate = 0, frame_size = 0; + DWORD width, height, framerate_num, framerate_den; + UINT32 unused; + + if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size))) + return NULL; + width = frame_size >> 32; + height = frame_size; + if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate))) + { + frame_rate = TRUE; + framerate_num = 0; + framerate_den = 1; + } + else + { + framerate_num = frame_rate >> 32; + framerate_den = frame_rate; + } + + /* Check if type is uncompressed */ + if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused))) + { + GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; + unsigned int i; + + output = gst_caps_new_empty_simple("video/x-raw"); + + for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) + { + if (IsEqualGUID(uncompressed_video_formats[i].subtype, &subtype)) + { + format = uncompressed_video_formats[i].format; + break; + } + } + + if (format == GST_VIDEO_FORMAT_UNKNOWN) + { + format = gst_video_format_from_fourcc(subtype.Data1); + } + + if (format == GST_VIDEO_FORMAT_UNKNOWN) + { + FIXME("Unrecognized format %s\n", debugstr_guid(&subtype)); + return NULL; + } + else + { + GstVideoInfo info; + + gst_video_info_set_format(&info, format, width, height); + output = gst_video_info_to_caps(&info); + } + + } + else { + FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype)); + return NULL; + } + + + if (frame_size) + { + gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL); + gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL); + } + if (frame_rate) + gst_caps_set_simple(output, "framerate", GST_TYPE_FRACTION, framerate_num, framerate_den, NULL); + return output; + } + else if (IsEqualGUID(&major_type, &MFMediaType_Audio)) + { + DWORD rate, channels, channel_mask, bitrate; + + if (IsEqualGUID(&subtype, &MFAudioFormat_Float)) + { + output = gst_caps_new_empty_simple("audio/x-raw"); + + gst_caps_set_simple(output, "format", G_TYPE_STRING, "F32LE", NULL); + gst_caps_set_simple(output, "layout", G_TYPE_STRING, "interleaved", NULL); + } + else if (IsEqualGUID(&subtype, &MFAudioFormat_PCM)) + { + DWORD bits_per_sample; + + if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_BITS_PER_SAMPLE, &bits_per_sample))) + { + char format[6]; + char type; + + type = bits_per_sample > 8 ? 'S' : 'U'; + + output = gst_caps_new_empty_simple("audio/x-raw"); + + sprintf(format, "%c%u%s", type, bits_per_sample, bits_per_sample > 8 ? "LE" : ""); + + gst_caps_set_simple(output, "format", G_TYPE_STRING, format, NULL); + } + else + { + ERR("Bits per sample not set.\n"); + return NULL; + } + } + else + { + FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype)); + return NULL; + } + + if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &rate))) + { + gst_caps_set_simple(output, "rate", G_TYPE_INT, rate, NULL); + } + if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &channels))) + { + gst_caps_set_simple(output, "channels", G_TYPE_INT, channels, NULL); + } + if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_CHANNEL_MASK, &channel_mask))) + { + gst_caps_set_simple(output, "channel-mask", GST_TYPE_BITMASK, (guint64) channel_mask, NULL); + } + + if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AVG_BITRATE, &bitrate))) + { + gst_caps_set_simple(output, "bitrate", G_TYPE_INT, bitrate, NULL); + } + + return output; + } + + FIXME("Unrecognized major type %s\n", debugstr_guid(&major_type)); + return NULL; +}
Hi,
While running your changed tests, I think I found new failures. Being a bot and all I'm not very good at pattern recognition, so I might be wrong, but could you please double-check?
Full results can be found at: https://testbot.winehq.org/JobDetails.pl?Key=79963
Your paranoid android.
=== w10pro64_he (64 bit report) ===
mfplat: mfplat.c:2775: Test failed: Unexpected return value 0x102. mfplat.c:2131: Test failed: Failed to get event, hr 0xc00d3e85. mfplat.c:2134: Test failed: Failed to get event, hr 0xc00d3e85. mfplat.c:2137: Test failed: Failed to finalize GetEvent, hr 0xc00d3e85. mfplat.c:2140: Test failed: Unexpected result, hr 0xc00d3e85.
=== debiant (64 bit WoW report) ===
mfplat: Unhandled exception: page fault on read access to 0x00000018 in 32-bit code (0x7df66b56).
Report validation errors: mfplat:mfplat crashed (c0000005)
On 10/6/20 10:59 AM, Derek Lesho wrote:
Signed-off-by: Derek Lesho dlesho@codeweavers.com
v2:
- Squashed prior commit for IMFMediaType conversion in.
- Replaced usage of gst_bus_poll with better (not perfect) alternative.
- Addressed comments.
dlls/mfplat/tests/mfplat.c | 8 +- dlls/winegstreamer/gst_private.h | 1 + dlls/winegstreamer/media_source.c | 316 +++++++++++++++++++++++++++++- dlls/winegstreamer/mfplat.c | 149 ++++++++++++++ 4 files changed, 466 insertions(+), 8 deletions(-)
diff --git a/dlls/mfplat/tests/mfplat.c b/dlls/mfplat/tests/mfplat.c index 15d5bcba3d6..bffce2bc114 100644 --- a/dlls/mfplat/tests/mfplat.c +++ b/dlls/mfplat/tests/mfplat.c @@ -605,10 +605,7 @@ todo_wine
var.vt = VT_EMPTY; hr = IMFMediaSource_Start(mediasource, descriptor, &GUID_NULL, &var);
-todo_wine ok(hr == S_OK, "Failed to start media source, hr %#x.\n", hr);
if (FAILED(hr))
goto skip_source_tests;
get_event((IMFMediaEventGenerator *)mediasource, MENewStream, &var); ok(var.vt == VT_UNKNOWN, "Unexpected value type %u from MENewStream event.\n", var.vt);
@@ -626,10 +623,13 @@ todo_wine hr = IMFMediaStream_RequestSample(video_stream, NULL); if (i == sample_count) break; +todo_wine ok(hr == S_OK, "Failed to request sample %u, hr %#x.\n", i + 1, hr); if (hr != S_OK) break; }
if (FAILED(hr))
goto skip_source_tests;
for (i = 0; i < sample_count; ++i) {
@@ -667,11 +667,11 @@ todo_wine
hr = IMFMediaStream_RequestSample(video_stream, NULL); ok(hr == MF_E_END_OF_STREAM, "Unexpected hr %#x.\n", hr);
IMFMediaStream_Release(video_stream);
get_event((IMFMediaEventGenerator *)mediasource, MEEndOfPresentation, NULL);
skip_source_tests:
- IMFMediaStream_Release(video_stream); IMFMediaTypeHandler_Release(handler); IMFPresentationDescriptor_Release(descriptor);
diff --git a/dlls/winegstreamer/gst_private.h b/dlls/winegstreamer/gst_private.h index 60b38a48f5a..07556802a51 100644 --- a/dlls/winegstreamer/gst_private.h +++ b/dlls/winegstreamer/gst_private.h @@ -57,6 +57,7 @@ extern HRESULT mfplat_get_class_object(REFCLSID rclsid, REFIID riid, void **obj)
HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN; IMFMediaType *mf_media_type_from_caps(const GstCaps *caps) DECLSPEC_HIDDEN; +GstCaps *caps_from_mf_media_type(IMFMediaType *type) DECLSPEC_HIDDEN;
HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN;
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index c70de184f0b..5eb4465fea6 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -55,14 +55,39 @@ struct media_stream { STREAM_INACTIVE, STREAM_SHUTDOWN,
} state; DWORD stream_id;STREAM_RUNNING,
- BOOL eos;
+};
+enum source_async_op +{
- SOURCE_ASYNC_START,
+};
+struct source_async_command +{
- IUnknown IUnknown_iface;
- LONG refcount;
- enum source_async_op op;
- union
- {
struct
{
IMFPresentationDescriptor *descriptor;
GUID format;
PROPVARIANT position;
} start;
- } u;
};
struct media_source { IMFMediaSource IMFMediaSource_iface;
- IMFAsyncCallback async_commands_callback; LONG ref;
- DWORD async_commands_queue; IMFMediaEventQueue *event_queue; IMFByteStream *byte_stream; struct media_stream **streams;
@@ -76,6 +101,7 @@ struct media_source { SOURCE_OPENING, SOURCE_STOPPED,
} state; HANDLE no_more_pads_event;SOURCE_RUNNING, SOURCE_SHUTDOWN,
@@ -91,7 +117,266 @@ static inline struct media_source *impl_from_IMFMediaSource(IMFMediaSource *ifac return CONTAINING_RECORD(iface, struct media_source, IMFMediaSource_iface); }
-static GstFlowReturn bytestream_wrapper_pull(GstPad *pad, GstObject *parent, guint64 ofs, guint len, +static inline struct media_source *impl_from_async_commands_callback_IMFAsyncCallback(IMFAsyncCallback *iface) +{
- return CONTAINING_RECORD(iface, struct media_source, async_commands_callback);
+}
+static inline struct source_async_command *impl_from_async_command_IUnknown(IUnknown *iface) +{
- return CONTAINING_RECORD(iface, struct source_async_command, IUnknown_iface);
+}
+static HRESULT WINAPI source_async_command_QueryInterface(IUnknown *iface, REFIID riid, void **obj) +{
- if (IsEqualIID(riid, &IID_IUnknown))
- {
*obj = iface;
IUnknown_AddRef(iface);
return S_OK;
- }
- WARN("Unsupported interface %s.\n", debugstr_guid(riid));
- *obj = NULL;
- return E_NOINTERFACE;
+}
+static ULONG WINAPI source_async_command_AddRef(IUnknown *iface) +{
- struct source_async_command *command = impl_from_async_command_IUnknown(iface);
- return InterlockedIncrement(&command->refcount);
+}
+static ULONG WINAPI source_async_command_Release(IUnknown *iface) +{
- struct source_async_command *command = impl_from_async_command_IUnknown(iface);
- ULONG refcount = InterlockedDecrement(&command->refcount);
- if (!refcount)
- {
if (command->op == SOURCE_ASYNC_START)
PropVariantClear(&command->u.start.position);
heap_free(command);
- }
- return refcount;
+}
+static const IUnknownVtbl source_async_command_vtbl = +{
- source_async_command_QueryInterface,
- source_async_command_AddRef,
- source_async_command_Release,
+};
+static HRESULT source_create_async_op(enum source_async_op op, struct source_async_command **ret) +{
- struct source_async_command *command;
- if (!(command = heap_alloc_zero(sizeof(*command))))
return E_OUTOFMEMORY;
- command->IUnknown_iface.lpVtbl = &source_async_command_vtbl;
- command->op = op;
- *ret = command;
- return S_OK;
+}
+static HRESULT WINAPI callback_QueryInterface(IMFAsyncCallback *iface, REFIID riid, void **obj) +{
- TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj);
- if (IsEqualIID(riid, &IID_IMFAsyncCallback) ||
IsEqualIID(riid, &IID_IUnknown))
- {
*obj = iface;
IMFAsyncCallback_AddRef(iface);
return S_OK;
- }
- WARN("Unsupported %s.\n", debugstr_guid(riid));
- *obj = NULL;
- return E_NOINTERFACE;
+}
+static HRESULT WINAPI callback_GetParameters(IMFAsyncCallback *iface,
DWORD *flags, DWORD *queue)
+{
- return E_NOTIMPL;
+}
+static ULONG WINAPI source_async_commands_callback_AddRef(IMFAsyncCallback *iface) +{
- struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface);
- return IMFMediaSource_AddRef(&source->IMFMediaSource_iface);
+}
+static ULONG WINAPI source_async_commands_callback_Release(IMFAsyncCallback *iface) +{
- struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface);
- return IMFMediaSource_Release(&source->IMFMediaSource_iface);
+}
+static IMFStreamDescriptor *stream_descriptor_from_id(IMFPresentationDescriptor *pres_desc, DWORD id, BOOL *selected) +{
- ULONG sd_count;
- IMFStreamDescriptor *ret;
- unsigned int i;
- if (FAILED(IMFPresentationDescriptor_GetStreamDescriptorCount(pres_desc, &sd_count)))
return NULL;
- for (i = 0; i < sd_count; i++)
- {
DWORD stream_id;
if (FAILED(IMFPresentationDescriptor_GetStreamDescriptorByIndex(pres_desc, i, selected, &ret)))
return NULL;
if (SUCCEEDED(IMFStreamDescriptor_GetStreamIdentifier(ret, &stream_id)) && stream_id == id)
return ret;
IMFStreamDescriptor_Release(ret);
- }
- return NULL;
+}
+static void start_pipeline(struct media_source *source, struct source_async_command *command) +{
- PROPVARIANT *position = &command->u.start.position;
- BOOL seek_message = source->state != SOURCE_STOPPED && position->vt != VT_EMPTY;
- GstStateChangeReturn ret;
- unsigned int i;
- gst_element_set_state(source->container, GST_STATE_PAUSED);
- ret = gst_element_get_state(source->container, NULL, NULL, -1);
- assert(ret == GST_STATE_CHANGE_SUCCESS);
- /* seek to beginning on stop->play */
- if (source->state == SOURCE_STOPPED && position->vt == VT_EMPTY)
- {
position->vt = VT_I8;
position->u.hVal.QuadPart = 0;
- }
- for (i = 0; i < source->stream_count; i++)
- {
struct media_stream *stream;
IMFStreamDescriptor *sd;
IMFMediaTypeHandler *mth;
IMFMediaType *current_mt;
GstCaps *current_caps;
GstCaps *prev_caps;
DWORD stream_id;
BOOL was_active;
BOOL selected;
stream = source->streams[i];
IMFStreamDescriptor_GetStreamIdentifier(stream->descriptor, &stream_id);
sd = stream_descriptor_from_id(command->u.start.descriptor, stream_id, &selected);
IMFStreamDescriptor_Release(sd);
was_active = stream->state != STREAM_INACTIVE;
stream->state = selected ? STREAM_RUNNING : STREAM_INACTIVE;
if (selected)
{
IMFStreamDescriptor_GetMediaTypeHandler(stream->descriptor, &mth);
IMFMediaTypeHandler_GetCurrentMediaType(mth, ¤t_mt);
current_caps = caps_from_mf_media_type(current_mt);
g_object_get(stream->appsink, "caps", &prev_caps, NULL);
if (!gst_caps_is_equal(prev_caps, current_caps))
{
GstEvent *reconfigure_event = gst_event_new_reconfigure();
g_object_set(stream->appsink, "caps", current_caps, NULL);
gst_pad_push_event(gst_element_get_static_pad(stream->appsink, "sink"), reconfigure_event);
}
gst_caps_unref(current_caps);
gst_caps_unref(prev_caps);
IMFMediaType_Release(current_mt);
IMFMediaTypeHandler_Release(mth);
}
if (position->vt != VT_EMPTY)
{
GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0);
GstSample *preroll;
gst_pad_push_event(stream->my_sink, seek_event);
/* this works because the pre-seek preroll is already removed by media_source_constructor */
g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll);
if (preroll)
gst_sample_unref(preroll);
What is the point of doing this?
stream->eos = FALSE;
}
if (selected)
{
TRACE("Stream %u (%p) selected\n", i, stream);
IMFMediaEventQueue_QueueEventParamUnk(source->event_queue,
was_active ? MEUpdatedStream : MENewStream, &GUID_NULL,
S_OK, (IUnknown*) &stream->IMFMediaStream_iface);
Please apply at least some indentation to line continuations.
IMFMediaEventQueue_QueueEventParamVar(stream->event_queue,
seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position);
}
- }
- IMFMediaEventQueue_QueueEventParamVar(source->event_queue,
seek_message ? MESourceSeeked : MESourceStarted,
&GUID_NULL, S_OK, position);
- source->state = SOURCE_RUNNING;
- gst_element_set_state(source->container, GST_STATE_PLAYING);
- gst_element_get_state(source->container, NULL, NULL, -1);
And if this method is asynchronous, do you need this gst_element_get_state() call?
+}
+static HRESULT WINAPI source_async_commands_Invoke(IMFAsyncCallback *iface, IMFAsyncResult *result) +{
- struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface);
- struct source_async_command *command;
- IUnknown *state;
- HRESULT hr;
- if (source->state == SOURCE_SHUTDOWN)
return S_OK;
- if (FAILED(hr = IMFAsyncResult_GetState(result, &state)))
return hr;
- command = impl_from_async_command_IUnknown(state);
- switch (command->op)
- {
case SOURCE_ASYNC_START:
start_pipeline(source, command);
break;
- }
- IUnknown_Release(state);
- return S_OK;
+}
+static const IMFAsyncCallbackVtbl source_async_commands_callback_vtbl = +{
- callback_QueryInterface,
- source_async_commands_callback_AddRef,
- source_async_commands_callback_Release,
- callback_GetParameters,
- source_async_commands_Invoke,
+};
+GstFlowReturn bytestream_wrapper_pull(GstPad *pad, GstObject *parent, guint64 ofs, guint len, GstBuffer **buf) { struct media_source *source = gst_pad_get_element_private(pad); @@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL;
- g_object_set(stream->appsink, "caps", source_caps, NULL);
Again, what's the point of this?
return S_OK;
}
@@ -682,16 +969,30 @@ static HRESULT WINAPI media_source_CreatePresentationDescriptor(IMFMediaSource * }
static HRESULT WINAPI media_source_Start(IMFMediaSource *iface, IMFPresentationDescriptor *descriptor,
const GUID *time_format, const PROPVARIANT *start_position)
const GUID *time_format, const PROPVARIANT *position)
{ struct media_source *source = impl_from_IMFMediaSource(iface);
- struct source_async_command *command;
- HRESULT hr;
- FIXME("(%p)->(%p, %p, %p): stub\n", source, descriptor, time_format, start_position);
TRACE("(%p)->(%p, %p, %p)\n", source, descriptor, time_format, position);
if (source->state == SOURCE_SHUTDOWN) return MF_E_SHUTDOWN;
- return E_NOTIMPL;
- if (!(IsEqualIID(time_format, &GUID_NULL)))
return MF_E_UNSUPPORTED_TIME_FORMAT;
- if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command)))
- {
command->u.start.descriptor = descriptor;
command->u.start.format = *time_format;
PropVariantCopy(&command->u.start.position, position);
hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface);
- }
There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
- return hr;
}
static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface) @@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event);
- if (source->async_commands_queue)
MFUnlockWorkQueue(source->async_commands_queue);
- return S_OK;
}
@@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY;
object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl;
- object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream);
@@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail;
- if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue)))
goto fail;
- object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL);
diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps)
return media_type;
}
+GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{
- GUID major_type;
- GUID subtype;
- GstCaps *output = NULL;
- if (FAILED(IMFMediaType_GetMajorType(type, &major_type)))
return NULL;
- if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype)))
return NULL;
- if (IsEqualGUID(&major_type, &MFMediaType_Video))
- {
UINT64 frame_rate = 0, frame_size = 0;
DWORD width, height, framerate_num, framerate_den;
UINT32 unused;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size)))
return NULL;
width = frame_size >> 32;
height = frame_size;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate)))
{
frame_rate = TRUE;
framerate_num = 0;
framerate_den = 1;
}
else
{
framerate_num = frame_rate >> 32;
framerate_den = frame_rate;
}
/* Check if type is uncompressed */
if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused)))
Early return could save a level of indentation.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
{
GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
unsigned int i;
output = gst_caps_new_empty_simple("video/x-raw");
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++)
{
if (IsEqualGUID(uncompressed_video_formats[i].subtype, &subtype))
{
format = uncompressed_video_formats[i].format;
break;
}
}
if (format == GST_VIDEO_FORMAT_UNKNOWN)
{
format = gst_video_format_from_fourcc(subtype.Data1);
}
if (format == GST_VIDEO_FORMAT_UNKNOWN)
{
FIXME("Unrecognized format %s\n", debugstr_guid(&subtype));
return NULL;
}
else
This "else" is redundant.
{
GstVideoInfo info;
gst_video_info_set_format(&info, format, width, height);
output = gst_video_info_to_caps(&info);
}
}
else {
Inconsistent braces.
FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype));
return NULL;
}
if (frame_size)
{
gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL);
gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL);
}
if (frame_rate)
Why do you need to check this is nonzero? Can the frame rate really be 0/0?
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
gst_caps_set_simple(output, "framerate", GST_TYPE_FRACTION, framerate_num, framerate_den, NULL);
return output;
- }
- else if (IsEqualGUID(&major_type, &MFMediaType_Audio))
- {
DWORD rate, channels, channel_mask, bitrate;
if (IsEqualGUID(&subtype, &MFAudioFormat_Float))
{
output = gst_caps_new_empty_simple("audio/x-raw");
gst_caps_set_simple(output, "format", G_TYPE_STRING, "F32LE", NULL);
gst_caps_set_simple(output, "layout", G_TYPE_STRING, "interleaved", NULL);
}
else if (IsEqualGUID(&subtype, &MFAudioFormat_PCM))
{
DWORD bits_per_sample;
if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_BITS_PER_SAMPLE, &bits_per_sample)))
{
char format[6];
char type;
type = bits_per_sample > 8 ? 'S' : 'U';
output = gst_caps_new_empty_simple("audio/x-raw");
sprintf(format, "%c%u%s", type, bits_per_sample, bits_per_sample > 8 ? "LE" : "");
gst_caps_set_simple(output, "format", G_TYPE_STRING, format, NULL);
}
else
{
ERR("Bits per sample not set.\n");
return NULL;
}
}
else
{
FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype));
return NULL;
}
if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &rate)))
{
gst_caps_set_simple(output, "rate", G_TYPE_INT, rate, NULL);
}
if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &channels)))
{
gst_caps_set_simple(output, "channels", G_TYPE_INT, channels, NULL);
}
if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AUDIO_CHANNEL_MASK, &channel_mask)))
{
gst_caps_set_simple(output, "channel-mask", GST_TYPE_BITMASK, (guint64) channel_mask, NULL);
}
if (SUCCEEDED(IMFMediaType_GetUINT32(type, &MF_MT_AVG_BITRATE, &bitrate)))
{
gst_caps_set_simple(output, "bitrate", G_TYPE_INT, bitrate, NULL);
}
return output;
- }
- FIXME("Unrecognized major type %s\n", debugstr_guid(&major_type));
- return NULL;
+}
On 10/6/20 9:58 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
if (position->vt != VT_EMPTY)
{
GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0);
GstSample *preroll;
gst_pad_push_event(stream->my_sink, seek_event);
/* this works because the pre-seek preroll is already removed by media_source_constructor */
g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll);
if (preroll)
gst_sample_unref(preroll);
What is the point of doing this?
To wait for the flush to complete. What is the point of waiting for the flush to complete? To ensure we don't send any MEMediaSample events from before the seek after MESourceSeeked.
IMFMediaEventQueue_QueueEventParamVar(stream->event_queue,
seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position);
}
- }
- IMFMediaEventQueue_QueueEventParamVar(source->event_queue,
seek_message ? MESourceSeeked : MESourceStarted,
&GUID_NULL, S_OK, position);
- source->state = SOURCE_RUNNING;
- gst_element_set_state(source->container, GST_STATE_PLAYING);
- gst_element_get_state(source->container, NULL, NULL, -1);
And if this method is asynchronous, do you need this gst_element_get_state() call?
Probably not, but not because the method is asynchronous, but rather because it shouldn't affect how we interact with the appsink/s.
@@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL;
- g_object_set(stream->appsink, "caps", source_caps, NULL);
Again, what's the point of this?
To see whether the caps have changed since the prior selection in start_pipeline. If they have changed, we must send a reconfigure event.
- if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command)))
- {
command->u.start.descriptor = descriptor;
command->u.start.format = *time_format;
PropVariantCopy(&command->u.start.position, position);
hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface);
- }
There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
Well, I think that making the media source calls synchronous in their own thread simplifies things since you don't have to worry about blocking or synchronization with other calls. For example, ::RequestSample, if synchronous, would have to have two paths, one for popping signals from the appsink and one for issuing the request into some dedicated request queue. This was actually the implementation I had at first, but then I implemented the async reader callback with the same system, and Nikolay instead insisted that I use a command queue, which reduces complexity a lot. Then when adding seeking support, I switched to this approach for the media source. If nothing else, this system matches other media foundation components better (IMFMediaSession, IMFSourceReader), which reduces overhead for the reader.
return hr; }
static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface)
@@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event);
- if (source->async_commands_queue)
MFUnlockWorkQueue(source->async_commands_queue);
}return S_OK;
@@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY;
object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl;
- object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream);
@@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail;
- if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue)))
goto fail;
object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL);
diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps)
return media_type;
}
+GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{
- GUID major_type;
- GUID subtype;
- GstCaps *output = NULL;
- if (FAILED(IMFMediaType_GetMajorType(type, &major_type)))
return NULL;
- if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype)))
return NULL;
- if (IsEqualGUID(&major_type, &MFMediaType_Video))
- {
UINT64 frame_rate = 0, frame_size = 0;
DWORD width, height, framerate_num, framerate_den;
UINT32 unused;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size)))
return NULL;
width = frame_size >> 32;
height = frame_size;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate)))
{
frame_rate = TRUE;
framerate_num = 0;
framerate_den = 1;
}
else
{
framerate_num = frame_rate >> 32;
framerate_den = frame_rate;
}
/* Check if type is uncompressed */
if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused)))
Early return could save a level of indentation.
Yes but in future patches there will be else statements for handling compressed types. Even if we don't end up using this for the media source, it will be necessary for games which manually use decoder MFTs.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
That's certainly a possibility, but using MFCalculateImageSize is probably the best way to concisely determine whether we want to take the uncompressed video format route. With your solution, I guess we'd just add the handling for those formats in `if (format == GST_VIDEO_FORMAT_UNKNOWN)`. Either way is fine, which path should I take?
FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype));
return NULL;
}
if (frame_size)
{
gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL);
gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL);
}
if (frame_rate)
Why do you need to check this is nonzero? Can the frame rate really be 0/0?
To be honest, I don't remember exactly which circumstance led to this being necessary. I assume it was something with the IMFMediaType MK11 generates, but it does seem very strange so I'll have to check again.
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
Why risk seeing what happens when you don't translate this cap, are there any benefits? Maybe a decoder would need it need it to handle some time-based seek?
On 10/7/20 9:58 AM, Derek Lesho wrote:
On 10/6/20 9:58 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
if (position->vt != VT_EMPTY)
{
GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0);
GstSample *preroll;
gst_pad_push_event(stream->my_sink, seek_event);
/* this works because the pre-seek preroll is already removed by media_source_constructor */
g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll);
if (preroll)
gst_sample_unref(preroll);
What is the point of doing this?
To wait for the flush to complete. What is the point of waiting for the flush to complete? To ensure we don't send any MEMediaSample events from before the seek after MESourceSeeked.
I don't think this is necessary; I don't think a flush can be asynchronous, nor a seek.
IMFMediaEventQueue_QueueEventParamVar(stream->event_queue,
seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position);
}
- }
- IMFMediaEventQueue_QueueEventParamVar(source->event_queue,
seek_message ? MESourceSeeked : MESourceStarted,
&GUID_NULL, S_OK, position);
- source->state = SOURCE_RUNNING;
- gst_element_set_state(source->container, GST_STATE_PLAYING);
- gst_element_get_state(source->container, NULL, NULL, -1);
And if this method is asynchronous, do you need this gst_element_get_state() call?
Probably not, but not because the method is asynchronous, but rather because it shouldn't affect how we interact with the appsink/s.
@@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL;
- g_object_set(stream->appsink, "caps", source_caps, NULL);
Again, what's the point of this?
To see whether the caps have changed since the prior selection in start_pipeline. If they have changed, we must send a reconfigure event.
This might need clarification in the code, then; it looks redundant at first glance.
It may even be better to remove it. I have my doubts that it will make a difference in terms of performance; in particular I suspect that the caps GStreamer generates and the caps generated by caps_from_mf_media_type() will have subtle differences.
- if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command)))
- {
command->u.start.descriptor = descriptor;
command->u.start.format = *time_format;
PropVariantCopy(&command->u.start.position, position);
hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface);
- }
There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
Well, I think that making the media source calls synchronous in their own thread simplifies things since you don't have to worry about blocking or synchronization with other calls. For example, ::RequestSample, if synchronous, would have to have two paths, one for popping signals from the appsink and one for issuing the request into some dedicated request queue.
I don't think I understand this; can you please explain in more detail?
This was actually the implementation I had at first, but then I implemented the async reader callback with the same system, and Nikolay instead insisted that I use a command queue, which reduces complexity a lot. Then when adding seeking support, I switched to this approach for the media source. If nothing else, this system matches other media foundation components better (IMFMediaSession, IMFSourceReader), which reduces overhead for the reader.
Even if RequestSample() needs to be asynchronous, is there a reason that Start() needs to be asynchronous? Thread safety is obviously important, but that can be achieved with simple locking.
return hr; }
static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface)
@@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event);
- if (source->async_commands_queue)
MFUnlockWorkQueue(source->async_commands_queue);
}return S_OK;
@@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY;
object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl;
- object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream);
@@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail;
- if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue)))
goto fail;
object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL);
diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps)
return media_type;
}
+GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{
- GUID major_type;
- GUID subtype;
- GstCaps *output = NULL;
- if (FAILED(IMFMediaType_GetMajorType(type, &major_type)))
return NULL;
- if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype)))
return NULL;
- if (IsEqualGUID(&major_type, &MFMediaType_Video))
- {
UINT64 frame_rate = 0, frame_size = 0;
DWORD width, height, framerate_num, framerate_den;
UINT32 unused;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size)))
return NULL;
width = frame_size >> 32;
height = frame_size;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate)))
{
frame_rate = TRUE;
framerate_num = 0;
framerate_den = 1;
}
else
{
framerate_num = frame_rate >> 32;
framerate_den = frame_rate;
}
/* Check if type is uncompressed */
if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused)))
Early return could save a level of indentation.
Yes but in future patches there will be else statements for handling compressed types. Even if we don't end up using this for the media source, it will be necessary for games which manually use decoder MFTs.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
That's certainly a possibility, but using MFCalculateImageSize is probably the best way to concisely determine whether we want to take the uncompressed video format route. With your solution, I guess we'd just add the handling for those formats in `if (format == GST_VIDEO_FORMAT_UNKNOWN)`. Either way is fine, which path should I take?
I don't see any reason we need to check whether it's uncompressed in general; both "an uncompressed format we can't handle" and "a compressed format" yield the same result.
FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype));
return NULL;
}
if (frame_size)
{
gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL);
gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL);
}
if (frame_rate)
Why do you need to check this is nonzero? Can the frame rate really be 0/0?
To be honest, I don't remember exactly which circumstance led to this being necessary. I assume it was something with the IMFMediaType MK11 generates, but it does seem very strange so I'll have to check again.
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
Why risk seeing what happens when you don't translate this cap, are there any benefits? Maybe a decoder would need it need it to handle some time-based seek?
In fact, translating the framerate in quartz has caused problems before, as applications didn't always provide it, or provided the wrong one, and thus the caps would not match; as a result we always remove it. On the other hand, I haven't seen any problems arise from clearing this field. In practice I don't think it should ever be necessary, because the parser provides the framerate.
On 10/7/20 1:53 PM, Zebediah Figura wrote:
On 10/7/20 9:58 AM, Derek Lesho wrote:
On 10/6/20 9:58 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
if (position->vt != VT_EMPTY)
{
GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0);
GstSample *preroll;
gst_pad_push_event(stream->my_sink, seek_event);
/* this works because the pre-seek preroll is already removed by media_source_constructor */
g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll);
if (preroll)
gst_sample_unref(preroll);
What is the point of doing this?
To wait for the flush to complete. What is the point of waiting for the flush to complete? To ensure we don't send any MEMediaSample events from before the seek after MESourceSeeked.
I don't think this is necessary; I don't think a flush can be asynchronous, nor a seek.
Interesting, so gst_pad_push_event blocks until the seek event is handled, but I'm not sure if this is before or after the flush start event has been propogated downstream, as what we're really waiting for is appsink to receive that and invalidate all buffered samples. I'll look into this and if just waiting for gst_pad_push_event to return suffices, I'll remove the hack.
IMFMediaEventQueue_QueueEventParamVar(stream->event_queue,
seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position);
}
- }
- IMFMediaEventQueue_QueueEventParamVar(source->event_queue,
seek_message ? MESourceSeeked : MESourceStarted,
&GUID_NULL, S_OK, position);
- source->state = SOURCE_RUNNING;
- gst_element_set_state(source->container, GST_STATE_PLAYING);
- gst_element_get_state(source->container, NULL, NULL, -1);
And if this method is asynchronous, do you need this gst_element_get_state() call?
Probably not, but not because the method is asynchronous, but rather because it shouldn't affect how we interact with the appsink/s.
@@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL;
- g_object_set(stream->appsink, "caps", source_caps, NULL);
Again, what's the point of this?
To see whether the caps have changed since the prior selection in start_pipeline. If they have changed, we must send a reconfigure event.
This might need clarification in the code, then; it looks redundant at first glance.
It may even be better to remove it. I have my doubts that it will make a difference in terms of performance; in particular I suspect that the caps GStreamer generates and the caps generated by caps_from_mf_media_type() will have subtle differences.
That's true, I guess I'll remove it.
- if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command)))
- {
command->u.start.descriptor = descriptor;
command->u.start.format = *time_format;
PropVariantCopy(&command->u.start.position, position);
hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface);
- }
There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
Well, I think that making the media source calls synchronous in their own thread simplifies things since you don't have to worry about blocking or synchronization with other calls. For example, ::RequestSample, if synchronous, would have to have two paths, one for popping signals from the appsink and one for issuing the request into some dedicated request queue.
I don't think I understand this; can you please explain in more detail?
Yeah, in my first implementation, I setup a new-sample callback with the appsink, as well as a queue using a doubly linked list for sample requests. When ::RequestSample was called, if the appsink had any buffered samples, I would immediately dispatch the sample with the token sent in. Otherwise, I would insert a sample request into the queue and return. When the new-sample callback was run, I'd pop the sample from the queue and send the event. In IMFMediaSource::Start. I still have the code for this lying around on my github [1], it was quite an unnecessary mess.
This was actually the implementation I had at first, but then I implemented the async reader callback with the same system, and Nikolay instead insisted that I use a command queue, which reduces complexity a lot. Then when adding seeking support, I switched to this approach for the media source. If nothing else, this system matches other media foundation components better (IMFMediaSession, IMFSourceReader), which reduces overhead for the reader.
Even if RequestSample() needs to be asynchronous, is there a reason that Start() needs to be asynchronous? Thread safety is obviously important, but that can be achieved with simple locking.
One thing I want to avoid here is blocking too much during a function which is documented to be asynchronous. We have to wait for the media streams to finish up responding to requested samples in this function, which may take a non trivial amount of time. I don't think it's unreasonable to expect there to be applications which calls IMFMediaSource methods from their interactive thread, resulting in our blocking causing a reduction in responsiveness.
return hr; }
static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface)
@@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event);
- if (source->async_commands_queue)
MFUnlockWorkQueue(source->async_commands_queue);
}return S_OK;
@@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY;
object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl;
- object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream);
@@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail;
- if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue)))
goto fail;
object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL);
diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps)
return media_type;
}
+GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{
- GUID major_type;
- GUID subtype;
- GstCaps *output = NULL;
- if (FAILED(IMFMediaType_GetMajorType(type, &major_type)))
return NULL;
- if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype)))
return NULL;
- if (IsEqualGUID(&major_type, &MFMediaType_Video))
- {
UINT64 frame_rate = 0, frame_size = 0;
DWORD width, height, framerate_num, framerate_den;
UINT32 unused;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size)))
return NULL;
width = frame_size >> 32;
height = frame_size;
if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate)))
{
frame_rate = TRUE;
framerate_num = 0;
framerate_den = 1;
}
else
{
framerate_num = frame_rate >> 32;
framerate_den = frame_rate;
}
/* Check if type is uncompressed */
if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused)))
Early return could save a level of indentation.
Yes but in future patches there will be else statements for handling compressed types. Even if we don't end up using this for the media source, it will be necessary for games which manually use decoder MFTs.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
That's certainly a possibility, but using MFCalculateImageSize is probably the best way to concisely determine whether we want to take the uncompressed video format route. With your solution, I guess we'd just add the handling for those formats in `if (format == GST_VIDEO_FORMAT_UNKNOWN)`. Either way is fine, which path should I take?
I don't see any reason we need to check whether it's uncompressed in general; both "an uncompressed format we can't handle" and "a compressed format" yield the same result.
Right now they do, but my point is that in future patches the structure is more like
if (uncompressed) { ... } else if (format == h264) { ... } else if (format == wmv)
FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype));
return NULL;
}
if (frame_size)
{
gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL);
gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL);
}
if (frame_rate)
Why do you need to check this is nonzero? Can the frame rate really be 0/0?
To be honest, I don't remember exactly which circumstance led to this being necessary. I assume it was something with the IMFMediaType MK11 generates, but it does seem very strange so I'll have to check again.
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
Why risk seeing what happens when you don't translate this cap, are there any benefits? Maybe a decoder would need it need it to handle some time-based seek?
In fact, translating the framerate in quartz has caused problems before, as applications didn't always provide it, or provided the wrong one, and thus the caps would not match; as a result we always remove it. On the other hand, I haven't seen any problems arise from clearing this field. In practice I don't think it should ever be necessary, because the parser provides the framerate.
My feeling is that such a workaround should be put in place where it causes problems, not in a general conversion function which may be used (as in later patches) for other purposes where the cap is necessary. For what it's worth, the chance of a media foundation application feeding some bogus framerate is slim, as they usually just search the media type handler for a desired type anyway, and we're supposed to fail on SetCurrentMediaType if the media type they enter is unsupported.
1: https://github.com/Guy1524/wine/blob/mfplat/dlls/winegstreamer/media_source....
On 10/7/20 3:19 PM, Derek Lesho wrote:
On 10/7/20 1:53 PM, Zebediah Figura wrote:
On 10/7/20 9:58 AM, Derek Lesho wrote:
On 10/6/20 9:58 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
+ if (position->vt != VT_EMPTY) + { + GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, + GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0); + GstSample *preroll;
+ gst_pad_push_event(stream->my_sink, seek_event);
+ /* this works because the pre-seek preroll is already removed by media_source_constructor */ + g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll); + if (preroll) + gst_sample_unref(preroll);
What is the point of doing this?
To wait for the flush to complete. What is the point of waiting for the flush to complete? To ensure we don't send any MEMediaSample events from before the seek after MESourceSeeked.
I don't think this is necessary; I don't think a flush can be asynchronous, nor a seek.
Interesting, so gst_pad_push_event blocks until the seek event is handled, but I'm not sure if this is before or after the flush start event has been propogated downstream, as what we're really waiting for is appsink to receive that and invalidate all buffered samples. I'll look into this and if just waiting for gst_pad_push_event to return suffices, I'll remove the hack.
A flushing seek sends both FLUSH_START and FLUSH_STOP events downstream.
Moreover, on further examination, why do you need to wait for this at all? The flushing seek explicitly guarantees that stale samples will be discarded; even if the event delivery were itself asynchronous, you're not sending samples as soon as you get them, but rather only when the application requests them, so as far as I can see the MEStreamSeeked (&c.) events sent by this function will still be serialized with respect to MEMediaSample events.
In fact, another thing: doesn't that mean you need to somehow flush the work queue here?
+ IMFMediaEventQueue_QueueEventParamVar(stream->event_queue, + seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position); + } + }
+ IMFMediaEventQueue_QueueEventParamVar(source->event_queue, + seek_message ? MESourceSeeked : MESourceStarted, + &GUID_NULL, S_OK, position);
+ source->state = SOURCE_RUNNING;
+ gst_element_set_state(source->container, GST_STATE_PLAYING); + gst_element_get_state(source->container, NULL, NULL, -1);
And if this method is asynchronous, do you need this gst_element_get_state() call?
Probably not, but not because the method is asynchronous, but rather because it shouldn't affect how we interact with the appsink/s.
@@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL; + g_object_set(stream->appsink, "caps", source_caps, NULL);
Again, what's the point of this?
To see whether the caps have changed since the prior selection in start_pipeline. If they have changed, we must send a reconfigure event.
This might need clarification in the code, then; it looks redundant at first glance.
It may even be better to remove it. I have my doubts that it will make a difference in terms of performance; in particular I suspect that the caps GStreamer generates and the caps generated by caps_from_mf_media_type() will have subtle differences.
That's true, I guess I'll remove it.
+ if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command))) + { + command->u.start.descriptor = descriptor; + command->u.start.format = *time_format; + PropVariantCopy(&command->u.start.position, position);
+ hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface); + }
There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
Well, I think that making the media source calls synchronous in their own thread simplifies things since you don't have to worry about blocking or synchronization with other calls. For example, ::RequestSample, if synchronous, would have to have two paths, one for popping signals from the appsink and one for issuing the request into some dedicated request queue.
I don't think I understand this; can you please explain in more detail?
Yeah, in my first implementation, I setup a new-sample callback with the appsink, as well as a queue using a doubly linked list for sample requests. When ::RequestSample was called, if the appsink had any buffered samples, I would immediately dispatch the sample with the token sent in. Otherwise, I would insert a sample request into the queue and return. When the new-sample callback was run, I'd pop the sample from the queue and send the event. In IMFMediaSource::Start. I still have the code for this lying around on my github [1], it was quite an unnecessary mess.
Okay, the point then is that IMFMediaSample::RequestSample() is supposed to always return immediately?
This was actually the implementation I had at first, but then I implemented the async reader callback with the same system, and Nikolay instead insisted that I use a command queue, which reduces complexity a lot. Then when adding seeking support, I switched to this approach for the media source. If nothing else, this system matches other media foundation components better (IMFMediaSession, IMFSourceReader), which reduces overhead for the reader.
Even if RequestSample() needs to be asynchronous, is there a reason that Start() needs to be asynchronous? Thread safety is obviously important, but that can be achieved with simple locking.
One thing I want to avoid here is blocking too much during a function which is documented to be asynchronous. We have to wait for the media streams to finish up responding to requested samples in this function, which may take a non trivial amount of time. I don't think it's unreasonable to expect there to be applications which calls IMFMediaSource methods from their interactive thread, resulting in our blocking causing a reduction in responsiveness.
This strikes me as premature optimization. But if you're really concerned, the best way to resolve this is in fact to measure the time this function takes.
+ return hr; } static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface) @@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event); + if (source->async_commands_queue) + MFUnlockWorkQueue(source->async_commands_queue);
return S_OK; } @@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY; object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl; + object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream); @@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail; + if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue))) + goto fail;
object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL); diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps) return media_type; }
+GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{ + GUID major_type; + GUID subtype; + GstCaps *output = NULL;
+ if (FAILED(IMFMediaType_GetMajorType(type, &major_type))) + return NULL; + if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return NULL;
+ if (IsEqualGUID(&major_type, &MFMediaType_Video)) + { + UINT64 frame_rate = 0, frame_size = 0; + DWORD width, height, framerate_num, framerate_den; + UINT32 unused;
+ if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size))) + return NULL; + width = frame_size >> 32; + height = frame_size; + if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate))) + { + frame_rate = TRUE; + framerate_num = 0; + framerate_den = 1; + } + else + { + framerate_num = frame_rate >> 32; + framerate_den = frame_rate; + }
+ /* Check if type is uncompressed */ + if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused)))
Early return could save a level of indentation.
Yes but in future patches there will be else statements for handling compressed types. Even if we don't end up using this for the media source, it will be necessary for games which manually use decoder MFTs.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
That's certainly a possibility, but using MFCalculateImageSize is probably the best way to concisely determine whether we want to take the uncompressed video format route. With your solution, I guess we'd just add the handling for those formats in `if (format == GST_VIDEO_FORMAT_UNKNOWN)`. Either way is fine, which path should I take?
I don't see any reason we need to check whether it's uncompressed in general; both "an uncompressed format we can't handle" and "a compressed format" yield the same result.
Right now they do, but my point is that in future patches the structure is more like
if (uncompressed) { ... } else if (format == h264) { ... } else if (format == wmv)
Right, I don't see why there's any point doing that when you can instead just leave the uncompressed case as the default, e.g.
if (IsEqualGUID(&subtype, &some_compressed_format)) ... else if (IsEqualGUID(&subtype, &some_other_compressed_format)) ... else { for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); ++i) ...
if (format == GST_VIDEO_FORMAT_UNKNOWN) ...
if (format == GST_VIDEO_FORMAT_UNKNOWN) return NULL; }
+ FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype)); + return NULL; + }
+ if (frame_size) + { + gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL); + gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL); + } + if (frame_rate)
Why do you need to check this is nonzero? Can the frame rate really be 0/0?
To be honest, I don't remember exactly which circumstance led to this being necessary. I assume it was something with the IMFMediaType MK11 generates, but it does seem very strange so I'll have to check again.
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
Why risk seeing what happens when you don't translate this cap, are there any benefits? Maybe a decoder would need it need it to handle some time-based seek?
In fact, translating the framerate in quartz has caused problems before, as applications didn't always provide it, or provided the wrong one, and thus the caps would not match; as a result we always remove it. On the other hand, I haven't seen any problems arise from clearing this field. In practice I don't think it should ever be necessary, because the parser provides the framerate.
My feeling is that such a workaround should be put in place where it causes problems, not in a general conversion function which may be used (as in later patches) for other purposes where the cap is necessary.
I don't think either translating the frame rate or not translating the frame rate is really incorrect, such that one can be called a workaround. And one is clearly more work than the other. [Note that gstdemux explicitly clears the framerate because it uses gst_video_info_to_caps() instead of building the caps structure manually.]
For what it's worth, the chance of a media foundation application feeding some bogus framerate is slim, as they usually just search the media type handler for a desired type anyway, and we're supposed to fail on SetCurrentMediaType if the media type they enter is unsupported.
1: https://github.com/Guy1524/wine/blob/mfplat/dlls/winegstreamer/media_source....
On 10/8/20 11:33 AM, Zebediah Figura wrote:
On 10/7/20 3:19 PM, Derek Lesho wrote:
On 10/7/20 1:53 PM, Zebediah Figura wrote:
On 10/7/20 9:58 AM, Derek Lesho wrote:
On 10/6/20 9:58 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
+ if (position->vt != VT_EMPTY) + { + GstEvent *seek_event = gst_event_new_seek(1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, + GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / 100, GST_SEEK_TYPE_NONE, 0); + GstSample *preroll;
+ gst_pad_push_event(stream->my_sink, seek_event);
+ /* this works because the pre-seek preroll is already removed by media_source_constructor */ + g_signal_emit_by_name(stream->appsink, "pull-preroll", &preroll); + if (preroll) + gst_sample_unref(preroll);
What is the point of doing this?
To wait for the flush to complete. What is the point of waiting for the flush to complete? To ensure we don't send any MEMediaSample events from before the seek after MESourceSeeked.
I don't think this is necessary; I don't think a flush can be asynchronous, nor a seek.
Interesting, so gst_pad_push_event blocks until the seek event is handled, but I'm not sure if this is before or after the flush start event has been propogated downstream, as what we're really waiting for is appsink to receive that and invalidate all buffered samples. I'll look into this and if just waiting for gst_pad_push_event to return suffices, I'll remove the hack.
A flushing seek sends both FLUSH_START and FLUSH_STOP events downstream.
Yes
Moreover, on further examination, why do you need to wait for this at all? The flushing seek explicitly guarantees that stale samples will be discarded; even if the event delivery were itself asynchronous, you're not sending samples as soon as you get them, but rather only when the application requests them, so as far as I can see the MEStreamSeeked (&c.) events sent by this function will still be serialized with respect to MEMediaSample events.
Yes, my only concern would be for when we send the seek event, then proceed to a ::RequestSample call, which then calls pull-sample before the seek-start event reaches the appsink. This would cause a pre-seek buffer from the appsink's queue to be returned.
In fact, another thing: doesn't that mean you need to somehow flush the work queue here?
This function is in step with the work queue, so all previous samples requests will have been fulfilled, as is documented in the MSDN:
After*Start*is called, each stream on the media source must do one of the following:
- Deliver media data in response toIMFMediaStream::RequestSample https://docs.microsoft.com/en-us/windows/desktop/api/mfidl/nf-mfidl-imfmediastream-requestsamplecalls.
- SendMEStreamTick https://docs.microsoft.com/en-us/windows/desktop/medfound/mestreamtickevents to indicate a gap in the stream.
- Send anMEEndOfStream https://docs.microsoft.com/en-us/windows/desktop/medfound/meendofstreamevent to indicate the end of the stream.
IMFMediaEventQueue_QueueEventParamVar(stream->event_queue, + seek_message ? MEStreamSeeked : MEStreamStarted, &GUID_NULL, S_OK, position); + } + }
+ IMFMediaEventQueue_QueueEventParamVar(source->event_queue, + seek_message ? MESourceSeeked : MESourceStarted, + &GUID_NULL, S_OK, position);
+ source->state = SOURCE_RUNNING;
+ gst_element_set_state(source->container, GST_STATE_PLAYING); + gst_element_get_state(source->container, NULL, NULL, -1);
And if this method is asynchronous, do you need this gst_element_get_state() call?
Probably not, but not because the method is asynchronous, but rather because it shouldn't affect how we interact with the appsink/s.
@@ -432,6 +717,8 @@ static HRESULT media_stream_connect_to_sink(struct media_stream *stream) if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK) return E_FAIL; + g_object_set(stream->appsink, "caps", source_caps, NULL);
Again, what's the point of this?
To see whether the caps have changed since the prior selection in start_pipeline. If they have changed, we must send a reconfigure event.
This might need clarification in the code, then; it looks redundant at first glance.
It may even be better to remove it. I have my doubts that it will make a difference in terms of performance; in particular I suspect that the caps GStreamer generates and the caps generated by caps_from_mf_media_type() will have subtle differences.
That's true, I guess I'll remove it.
+ if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, &command))) + { + command->u.start.descriptor = descriptor; + command->u.start.format = *time_format; + PropVariantCopy(&command->u.start.position, position);
+ hr = MFPutWorkItem(source->async_commands_queue, &source->async_commands_callback, &command->IUnknown_iface); + }
There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
Well, I think that making the media source calls synchronous in their own thread simplifies things since you don't have to worry about blocking or synchronization with other calls. For example, ::RequestSample, if synchronous, would have to have two paths, one for popping signals from the appsink and one for issuing the request into some dedicated request queue.
I don't think I understand this; can you please explain in more detail?
Yeah, in my first implementation, I setup a new-sample callback with the appsink, as well as a queue using a doubly linked list for sample requests. When ::RequestSample was called, if the appsink had any buffered samples, I would immediately dispatch the sample with the token sent in. Otherwise, I would insert a sample request into the queue and return. When the new-sample callback was run, I'd pop the sample from the queue and send the event. In IMFMediaSource::Start. I still have the code for this lying around on my github [1], it was quite an unnecessary mess.
Okay, the point then is that IMFMediaSample::RequestSample() is supposed to always return immediately?
Yes, it is supposed to. One thing I failed to mention in my previous email (the incomplete "In IMFMediaSource::Start." sentence) is that we also require code for draining the ::RequestSample queue in ::Start, again increasing complexity.
This was actually the implementation I had at first, but then I implemented the async reader callback with the same system, and Nikolay instead insisted that I use a command queue, which reduces complexity a lot. Then when adding seeking support, I switched to this approach for the media source. If nothing else, this system matches other media foundation components better (IMFMediaSession, IMFSourceReader), which reduces overhead for the reader.
Even if RequestSample() needs to be asynchronous, is there a reason that Start() needs to be asynchronous? Thread safety is obviously important, but that can be achieved with simple locking.
One thing I want to avoid here is blocking too much during a function which is documented to be asynchronous. We have to wait for the media streams to finish up responding to requested samples in this function, which may take a non trivial amount of time. I don't think it's unreasonable to expect there to be applications which calls IMFMediaSource methods from their interactive thread, resulting in our blocking causing a reduction in responsiveness.
This strikes me as premature optimization. But if you're really concerned, the best way to resolve this is in fact to measure the time this function takes.
The time it takes depends on multiple factors, whether it's a seek or a start, which demuxer, decoders, and elements are hooked up, and mainly how many ::RequestSample instances are in a queue at the time of the call. Plus, it's not the only reason to keep it asynchronous as documented. As mentioned earlier, it better aligns to other media foundation classes, and reduces code complexity.
+ return hr; } static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface) @@ -772,6 +1073,9 @@ static HRESULT WINAPI media_source_Shutdown(IMFMediaSource *iface) if (source->no_more_pads_event) CloseHandle(source->no_more_pads_event); + if (source->async_commands_queue) + MFUnlockWorkQueue(source->async_commands_queue);
return S_OK; } @@ -852,6 +1156,7 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ return E_OUTOFMEMORY; object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl; + object->async_commands_callback.lpVtbl = &source_async_commands_callback_vtbl; object->ref = 1; object->byte_stream = bytestream; IMFByteStream_AddRef(bytestream); @@ -860,6 +1165,9 @@ static HRESULT media_source_constructor(IMFByteStream *bytestream, struct media_ if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail; + if (FAILED(hr = MFAllocateWorkQueue(&object->async_commands_queue))) + goto fail;
object->container = gst_bin_new(NULL); object->bus = gst_bus_new(); gst_bus_set_sync_handler(object->bus, mf_src_bus_watch_wrapper, object, NULL); diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 2e8b0978648..9aa17ad00ab 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const GstCaps *caps) return media_type; }
+GstCaps *caps_from_mf_media_type(IMFMediaType *type) +{ + GUID major_type; + GUID subtype; + GstCaps *output = NULL;
+ if (FAILED(IMFMediaType_GetMajorType(type, &major_type))) + return NULL; + if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) + return NULL;
+ if (IsEqualGUID(&major_type, &MFMediaType_Video)) + { + UINT64 frame_rate = 0, frame_size = 0; + DWORD width, height, framerate_num, framerate_den; + UINT32 unused;
+ if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, &frame_size))) + return NULL; + width = frame_size >> 32; + height = frame_size; + if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate))) + { + frame_rate = TRUE; + framerate_num = 0; + framerate_den = 1; + } + else + { + framerate_num = frame_rate >> 32; + framerate_den = frame_rate; + }
+ /* Check if type is uncompressed */ + if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, &unused)))
Early return could save a level of indentation.
Yes but in future patches there will be else statements for handling compressed types. Even if we don't end up using this for the media source, it will be necessary for games which manually use decoder MFTs.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
That's certainly a possibility, but using MFCalculateImageSize is probably the best way to concisely determine whether we want to take the uncompressed video format route. With your solution, I guess we'd just add the handling for those formats in `if (format == GST_VIDEO_FORMAT_UNKNOWN)`. Either way is fine, which path should I take?
I don't see any reason we need to check whether it's uncompressed in general; both "an uncompressed format we can't handle" and "a compressed format" yield the same result.
Right now they do, but my point is that in future patches the structure is more like
if (uncompressed) { ... } else if (format == h264) { ... } else if (format == wmv)
Right, I don't see why there's any point doing that when you can instead just leave the uncompressed case as the default, e.g.
if (IsEqualGUID(&subtype, &some_compressed_format)) ... else if (IsEqualGUID(&subtype, &some_other_compressed_format)) ... else { for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); ++i) ...
if (format == GST_VIDEO_FORMAT_UNKNOWN) ... if (format == GST_VIDEO_FORMAT_UNKNOWN) return NULL;
}
Sounds good
+ FIXME("Unrecognized subtype %s\n", debugstr_guid(&subtype)); + return NULL; + }
+ if (frame_size) + { + gst_caps_set_simple(output, "width", G_TYPE_INT, width, NULL); + gst_caps_set_simple(output, "height", G_TYPE_INT, height, NULL); + } + if (frame_rate)
Why do you need to check this is nonzero? Can the frame rate really be 0/0?
To be honest, I don't remember exactly which circumstance led to this being necessary. I assume it was something with the IMFMediaType MK11 generates, but it does seem very strange so I'll have to check again.
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
Why risk seeing what happens when you don't translate this cap, are there any benefits? Maybe a decoder would need it need it to handle some time-based seek?
In fact, translating the framerate in quartz has caused problems before, as applications didn't always provide it, or provided the wrong one, and thus the caps would not match; as a result we always remove it. On the other hand, I haven't seen any problems arise from clearing this field. In practice I don't think it should ever be necessary, because the parser provides the framerate.
My feeling is that such a workaround should be put in place where it causes problems, not in a general conversion function which may be used (as in later patches) for other purposes where the cap is necessary.
I don't think either translating the frame rate or not translating the frame rate is really incorrect, such that one can be called a workaround. And one is clearly more work than the other. [Note that gstdemux explicitly clears the framerate because it uses gst_video_info_to_caps() instead of building the caps structure manually.]
In quartz, you have a static function which is only used within the context of your equivalent of the media source. This definition resides inside mfplat.c, where it will also be used by the decoder, and in the future, encoders (some games use the encoders of media foundation to save highlights to disk, and streaming services use it for obvious reasons). I find it very likely that an encoder would want to know the framerate of an incoming stream.
For what it's worth, the chance of a media foundation application feeding some bogus framerate is slim, as they usually just search the media type handler for a desired type anyway, and we're supposed to fail on SetCurrentMediaType if the media type they enter is unsupported. 1: https://github.com/Guy1524/wine/blob/mfplat/dlls/winegstreamer/media_source....
On 10/8/20 12:06 PM, Derek Lesho wrote:
On 10/8/20 11:33 AM, Zebediah Figura wrote:
On 10/7/20 3:19 PM, Derek Lesho wrote:
On 10/7/20 1:53 PM, Zebediah Figura wrote:
On 10/7/20 9:58 AM, Derek Lesho wrote:
On 10/6/20 9:58 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote: > + if (position->vt != VT_EMPTY) > + { > + GstEvent *seek_event = gst_event_new_seek(1.0, > GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, > + GST_SEEK_TYPE_SET, position->u.hVal.QuadPart / > 100, GST_SEEK_TYPE_NONE, 0); > + GstSample *preroll; > + > + gst_pad_push_event(stream->my_sink, seek_event); > + > + /* this works because the pre-seek preroll is already > removed by media_source_constructor */ > + g_signal_emit_by_name(stream->appsink, "pull-preroll", > &preroll); > + if (preroll) > + gst_sample_unref(preroll); What is the point of doing this?
To wait for the flush to complete. What is the point of waiting for the flush to complete? To ensure we don't send any MEMediaSample events from before the seek after MESourceSeeked.
I don't think this is necessary; I don't think a flush can be asynchronous, nor a seek.
Interesting, so gst_pad_push_event blocks until the seek event is handled, but I'm not sure if this is before or after the flush start event has been propogated downstream, as what we're really waiting for is appsink to receive that and invalidate all buffered samples. I'll look into this and if just waiting for gst_pad_push_event to return suffices, I'll remove the hack.
A flushing seek sends both FLUSH_START and FLUSH_STOP events downstream.
Yes
Moreover, on further examination, why do you need to wait for this at all? The flushing seek explicitly guarantees that stale samples will be discarded; even if the event delivery were itself asynchronous, you're not sending samples as soon as you get them, but rather only when the application requests them, so as far as I can see the MEStreamSeeked (&c.) events sent by this function will still be serialized with respect to MEMediaSample events.
Yes, my only concern would be for when we send the seek event, then proceed to a ::RequestSample call, which then calls pull-sample before the seek-start event reaches the appsink. This would cause a pre-seek buffer from the appsink's queue to be returned.
Right, I don't think that can happen; the seek has to complete before gst_pad_push_event() returns.
In fact, another thing: doesn't that mean you need to somehow flush the work queue here?
This function is in step with the work queue, so all previous samples requests will have been fulfilled, as is documented in the MSDN:
After *Start* is called, each stream on the media source must do one of the following:
- Deliver media data in response to IMFMediaStream::RequestSample https://docs.microsoft.com/en-us/windows/desktop/api/mfidl/nf-mfidl-imfmediastream-requestsample calls.
- Send MEStreamTick https://docs.microsoft.com/en-us/windows/desktop/medfound/mestreamtick events to indicate a gap in the stream.
- Send an MEEndOfStream https://docs.microsoft.com/en-us/windows/desktop/medfound/meendofstream event to indicate the end of the stream.
Okay, that's a good reason to make Start() asynchronous, then. I don't know if that deserves better communication in the code.
> + > + > IMFMediaEventQueue_QueueEventParamVar(stream->event_queue, > + seek_message ? MEStreamSeeked : MEStreamStarted, > &GUID_NULL, S_OK, position); > + } > + } > + > + IMFMediaEventQueue_QueueEventParamVar(source->event_queue, > + seek_message ? MESourceSeeked : MESourceStarted, > + &GUID_NULL, S_OK, position); > + > + source->state = SOURCE_RUNNING; > + > + gst_element_set_state(source->container, GST_STATE_PLAYING); > + gst_element_get_state(source->container, NULL, NULL, -1); And if this method is asynchronous, do you need this gst_element_get_state() call?
Probably not, but not because the method is asynchronous, but rather because it shouldn't affect how we interact with the appsink/s.
> @@ -432,6 +717,8 @@ static HRESULT > media_stream_connect_to_sink(struct media_stream *stream) > if (gst_pad_link(stream->their_src, stream->my_sink) != > GST_PAD_LINK_OK) > return E_FAIL; > + g_object_set(stream->appsink, "caps", source_caps, NULL); > + Again, what's the point of this?
To see whether the caps have changed since the prior selection in start_pipeline. If they have changed, we must send a reconfigure event.
This might need clarification in the code, then; it looks redundant at first glance.
It may even be better to remove it. I have my doubts that it will make a difference in terms of performance; in particular I suspect that the caps GStreamer generates and the caps generated by caps_from_mf_media_type() will have subtle differences.
That's true, I guess I'll remove it.
> + if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_START, > &command))) > + { > + command->u.start.descriptor = descriptor; > + command->u.start.format = *time_format; > + PropVariantCopy(&command->u.start.position, position); > + > + hr = MFPutWorkItem(source->async_commands_queue, > &source->async_commands_callback, &command->IUnknown_iface); > + } There are likely mfplat pecularities I'm not aware of here, but does this need to be asynchronous? I know that the documentation says "this method is asynchronous", but it's not immediately obvious to me that there isn't already a level of asynchronicity between this call and any of its effects.
Well, I think that making the media source calls synchronous in their own thread simplifies things since you don't have to worry about blocking or synchronization with other calls. For example, ::RequestSample, if synchronous, would have to have two paths, one for popping signals from the appsink and one for issuing the request into some dedicated request queue.
I don't think I understand this; can you please explain in more detail?
Yeah, in my first implementation, I setup a new-sample callback with the appsink, as well as a queue using a doubly linked list for sample requests. When ::RequestSample was called, if the appsink had any buffered samples, I would immediately dispatch the sample with the token sent in. Otherwise, I would insert a sample request into the queue and return. When the new-sample callback was run, I'd pop the sample from the queue and send the event. In IMFMediaSource::Start. I still have the code for this lying around on my github [1], it was quite an unnecessary mess.
Okay, the point then is that IMFMediaSample::RequestSample() is supposed to always return immediately?
Yes, it is supposed to. One thing I failed to mention in my previous email (the incomplete "In IMFMediaSource::Start." sentence) is that we also require code for draining the ::RequestSample queue in ::Start, again increasing complexity.
This was actually the implementation I had at first, but then I implemented the async reader callback with the same system, and Nikolay instead insisted that I use a command queue, which reduces complexity a lot. Then when adding seeking support, I switched to this approach for the media source. If nothing else, this system matches other media foundation components better (IMFMediaSession, IMFSourceReader), which reduces overhead for the reader.
Even if RequestSample() needs to be asynchronous, is there a reason that Start() needs to be asynchronous? Thread safety is obviously important, but that can be achieved with simple locking.
One thing I want to avoid here is blocking too much during a function which is documented to be asynchronous. We have to wait for the media streams to finish up responding to requested samples in this function, which may take a non trivial amount of time. I don't think it's unreasonable to expect there to be applications which calls IMFMediaSource methods from their interactive thread, resulting in our blocking causing a reduction in responsiveness.
This strikes me as premature optimization. But if you're really concerned, the best way to resolve this is in fact to measure the time this function takes.
The time it takes depends on multiple factors, whether it's a seek or a start, which demuxer, decoders, and elements are hooked up, and mainly how many ::RequestSample instances are in a queue at the time of the call. Plus, it's not the only reason to keep it asynchronous as documented. As mentioned earlier, it better aligns to other media foundation classes, and reduces code complexity.
> + > + return hr; > } > static HRESULT WINAPI media_source_Stop(IMFMediaSource *iface) > @@ -772,6 +1073,9 @@ static HRESULT WINAPI > media_source_Shutdown(IMFMediaSource *iface) > if (source->no_more_pads_event) > CloseHandle(source->no_more_pads_event); > + if (source->async_commands_queue) > + MFUnlockWorkQueue(source->async_commands_queue); > + > return S_OK; > } > @@ -852,6 +1156,7 @@ static HRESULT > media_source_constructor(IMFByteStream *bytestream, struct media_ > return E_OUTOFMEMORY; > object->IMFMediaSource_iface.lpVtbl = &IMFMediaSource_vtbl; > + object->async_commands_callback.lpVtbl = > &source_async_commands_callback_vtbl; > object->ref = 1; > object->byte_stream = bytestream; > IMFByteStream_AddRef(bytestream); > @@ -860,6 +1165,9 @@ static HRESULT > media_source_constructor(IMFByteStream *bytestream, struct media_ > if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) > goto fail; > + if (FAILED(hr = > MFAllocateWorkQueue(&object->async_commands_queue))) > + goto fail; > + > object->container = gst_bin_new(NULL); > object->bus = gst_bus_new(); > gst_bus_set_sync_handler(object->bus, > mf_src_bus_watch_wrapper, object, NULL); > diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c > index 2e8b0978648..9aa17ad00ab 100644 > --- a/dlls/winegstreamer/mfplat.c > +++ b/dlls/winegstreamer/mfplat.c > @@ -601,3 +601,152 @@ IMFMediaType *mf_media_type_from_caps(const > GstCaps *caps) > return media_type; > } > + > +GstCaps *caps_from_mf_media_type(IMFMediaType *type) > +{ > + GUID major_type; > + GUID subtype; > + GstCaps *output = NULL; > + > + if (FAILED(IMFMediaType_GetMajorType(type, &major_type))) > + return NULL; > + if (FAILED(IMFMediaType_GetGUID(type, &MF_MT_SUBTYPE, &subtype))) > + return NULL; > + > + if (IsEqualGUID(&major_type, &MFMediaType_Video)) > + { > + UINT64 frame_rate = 0, frame_size = 0; > + DWORD width, height, framerate_num, framerate_den; > + UINT32 unused; > + > + if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_SIZE, > &frame_size))) > + return NULL; > + width = frame_size >> 32; > + height = frame_size; > + if (FAILED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, > &frame_rate))) > + { > + frame_rate = TRUE; > + framerate_num = 0; > + framerate_den = 1; > + } > + else > + { > + framerate_num = frame_rate >> 32; > + framerate_den = frame_rate; > + } > + > + /* Check if type is uncompressed */ > + if (SUCCEEDED(MFCalculateImageSize(&subtype, 100, 100, > &unused))) Early return could save a level of indentation.
Yes but in future patches there will be else statements for handling compressed types. Even if we don't end up using this for the media source, it will be necessary for games which manually use decoder MFTs.
I also feel like there's an easier way to do this check. Maybe something like:
for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); i++) { ... }
if (format == GST_VIDEO_FORMAT_UNKNOWN && !memcmp(&subtype, &MFVideoFormat_Base.Data2, ...)) format = gst_video_format_from_fourcc(subtype.Data1);
if (format == GST_VIDEO_FORMAT_UNKNOWN) { FIXME("Unrecognized subtype %s.\n", debugstr_guid(&subtype)); return NULL; }
That's certainly a possibility, but using MFCalculateImageSize is probably the best way to concisely determine whether we want to take the uncompressed video format route. With your solution, I guess we'd just add the handling for those formats in `if (format == GST_VIDEO_FORMAT_UNKNOWN)`. Either way is fine, which path should I take?
I don't see any reason we need to check whether it's uncompressed in general; both "an uncompressed format we can't handle" and "a compressed format" yield the same result.
Right now they do, but my point is that in future patches the structure is more like
if (uncompressed) { ... } else if (format == h264) { ... } else if (format == wmv)
Right, I don't see why there's any point doing that when you can instead just leave the uncompressed case as the default, e.g.
if (IsEqualGUID(&subtype, &some_compressed_format)) ... else if (IsEqualGUID(&subtype, &some_other_compressed_format)) ... else { for (i = 0; i < ARRAY_SIZE(uncompressed_video_formats); ++i) ...
if (format == GST_VIDEO_FORMAT_UNKNOWN) ... if (format == GST_VIDEO_FORMAT_UNKNOWN) return NULL;
}
Sounds good
> + FIXME("Unrecognized subtype %s\n", > debugstr_guid(&subtype)); > + return NULL; > + } > + > + > + if (frame_size) > + { > + gst_caps_set_simple(output, "width", G_TYPE_INT, > width, NULL); > + gst_caps_set_simple(output, "height", G_TYPE_INT, > height, NULL); > + } > + if (frame_rate) Why do you need to check this is nonzero? Can the frame rate really be 0/0?
To be honest, I don't remember exactly which circumstance led to this being necessary. I assume it was something with the IMFMediaType MK11 generates, but it does seem very strange so I'll have to check again.
For that matter, do we need to give GStreamer the framerate at all? It should only ever matter to sink elements (or those that respect a clock).
Why risk seeing what happens when you don't translate this cap, are there any benefits? Maybe a decoder would need it need it to handle some time-based seek?
In fact, translating the framerate in quartz has caused problems before, as applications didn't always provide it, or provided the wrong one, and thus the caps would not match; as a result we always remove it. On the other hand, I haven't seen any problems arise from clearing this field. In practice I don't think it should ever be necessary, because the parser provides the framerate.
My feeling is that such a workaround should be put in place where it causes problems, not in a general conversion function which may be used (as in later patches) for other purposes where the cap is necessary.
I don't think either translating the frame rate or not translating the frame rate is really incorrect, such that one can be called a workaround. And one is clearly more work than the other. [Note that gstdemux explicitly clears the framerate because it uses gst_video_info_to_caps() instead of building the caps structure manually.]
In quartz, you have a static function which is only used within the context of your equivalent of the media source. This definition resides inside mfplat.c, where it will also be used by the decoder, and in the future, encoders (some games use the encoders of media foundation to save highlights to disk, and streaming services use it for obvious reasons). I find it very likely that an encoder would want to know the framerate of an incoming stream.
Okay, I checked, and at least some decoders (dvdec) do demand to know the framerate, for some reason. [dvdec doesn't actually do anything with this information except pass it on to the downstream pad. Nor can I guess why a decoder would need to know the framerate in general].
For what it's worth, the chance of a media foundation application feeding some bogus framerate is slim, as they usually just search the media type handler for a desired type anyway, and we're supposed to fail on SetCurrentMediaType if the media type they enter is unsupported. 1: https://github.com/Guy1524/wine/blob/mfplat/dlls/winegstreamer/media_source....
Signed-off-by: Derek Lesho dlesho@codeweavers.com --- v2: Addressed comments. --- dlls/mfplat/tests/mfplat.c | 4 -- dlls/winegstreamer/gst_private.h | 1 + dlls/winegstreamer/media_source.c | 89 ++++++++++++++++++++++++++++++- dlls/winegstreamer/mfplat.c | 69 ++++++++++++++++++++++++ 4 files changed, 158 insertions(+), 5 deletions(-)
diff --git a/dlls/mfplat/tests/mfplat.c b/dlls/mfplat/tests/mfplat.c index bffce2bc114..c54113e5588 100644 --- a/dlls/mfplat/tests/mfplat.c +++ b/dlls/mfplat/tests/mfplat.c @@ -623,13 +623,10 @@ todo_wine hr = IMFMediaStream_RequestSample(video_stream, NULL); if (i == sample_count) break; -todo_wine ok(hr == S_OK, "Failed to request sample %u, hr %#x.\n", i + 1, hr); if (hr != S_OK) break; } - if (FAILED(hr)) - goto skip_source_tests;
for (i = 0; i < sample_count; ++i) { @@ -670,7 +667,6 @@ todo_wine
get_event((IMFMediaEventGenerator *)mediasource, MEEndOfPresentation, NULL);
-skip_source_tests: IMFMediaStream_Release(video_stream); IMFMediaTypeHandler_Release(handler); IMFPresentationDescriptor_Release(descriptor); diff --git a/dlls/winegstreamer/gst_private.h b/dlls/winegstreamer/gst_private.h index 07556802a51..ff5aff42482 100644 --- a/dlls/winegstreamer/gst_private.h +++ b/dlls/winegstreamer/gst_private.h @@ -58,6 +58,7 @@ extern HRESULT mfplat_get_class_object(REFCLSID rclsid, REFIID riid, void **obj) HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN; IMFMediaType *mf_media_type_from_caps(const GstCaps *caps) DECLSPEC_HIDDEN; GstCaps *caps_from_mf_media_type(IMFMediaType *type) DECLSPEC_HIDDEN; +IMFSample *mf_sample_from_gst_buffer(GstBuffer *in) DECLSPEC_HIDDEN;
HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN;
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index 5eb4465fea6..1593a60dfe1 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -64,6 +64,7 @@ struct media_stream enum source_async_op { SOURCE_ASYNC_START, + SOURCE_ASYNC_REQUEST_SAMPLE, };
struct source_async_command @@ -79,6 +80,11 @@ struct source_async_command GUID format; PROPVARIANT position; } start; + struct + { + struct media_stream *stream; + IUnknown *token; + } request_sample; } u; };
@@ -341,6 +347,59 @@ static void start_pipeline(struct media_source *source, struct source_async_comm gst_element_get_state(source->container, NULL, NULL, -1); }
+static void dispatch_end_of_presentation(struct media_source *source) +{ + PROPVARIANT empty = {.vt = VT_EMPTY}; + unsigned int i; + + /* A stream has ended, check whether all have */ + for (i = 0; i < source->stream_count; i++) + { + struct media_stream *stream = source->streams[i]; + + if (stream->state != STREAM_INACTIVE && !stream->eos) + return; + } + + IMFMediaEventQueue_QueueEventParamVar(source->event_queue, MEEndOfPresentation, &GUID_NULL, S_OK, &empty); +} + +static void wait_on_sample(struct media_stream *stream, IUnknown *token) +{ + PROPVARIANT empty_var = {.vt = VT_EMPTY}; + GstSample *gst_sample; + GstBuffer *buffer; + IMFSample *sample; + + TRACE("%p, %p\n", stream, token); + + g_signal_emit_by_name(stream->appsink, "pull-sample", &gst_sample); + if (gst_sample) + { + buffer = gst_sample_get_buffer(gst_sample); + + TRACE("PTS = %llu\n", (unsigned long long int) GST_BUFFER_PTS(buffer)); + + sample = mf_sample_from_gst_buffer(buffer); + gst_sample_unref(gst_sample); + + if (token) + IMFSample_SetUnknown(sample, &MFSampleExtension_Token, token); + + IMFMediaEventQueue_QueueEventParamUnk(stream->event_queue, MEMediaSample, &GUID_NULL, S_OK, (IUnknown *)sample); + IMFSample_Release(sample); + } + + g_object_get(stream->appsink, "eos", &stream->eos, NULL); + if (stream->eos) + { + if (token) + IUnknown_Release(token); + IMFMediaEventQueue_QueueEventParamVar(stream->event_queue, MEEndOfStream, &GUID_NULL, S_OK, &empty_var); + dispatch_end_of_presentation(stream->parent_source); + } +} + static HRESULT WINAPI source_async_commands_Invoke(IMFAsyncCallback *iface, IMFAsyncResult *result) { struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface); @@ -360,6 +419,9 @@ static HRESULT WINAPI source_async_commands_Invoke(IMFAsyncCallback *iface, IMFA case SOURCE_ASYNC_START: start_pipeline(source, command); break; + case SOURCE_ASYNC_REQUEST_SAMPLE: + wait_on_sample(command->u.request_sample.stream, command->u.request_sample.token); + break; }
IUnknown_Release(state); @@ -647,13 +709,37 @@ static HRESULT WINAPI media_stream_GetStreamDescriptor(IMFMediaStream* iface, IM static HRESULT WINAPI media_stream_RequestSample(IMFMediaStream *iface, IUnknown *token) { struct media_stream *stream = impl_from_IMFMediaStream(iface); + struct source_async_command *command; + HRESULT hr;
TRACE("(%p)->(%p)\n", iface, token);
if (stream->state == STREAM_SHUTDOWN) return MF_E_SHUTDOWN;
- return E_NOTIMPL; + if (stream->state == STREAM_INACTIVE) + { + WARN("Stream isn't active\n"); + return MF_E_MEDIA_SOURCE_WRONGSTATE; + } + + if (stream->eos) + { + return MF_E_END_OF_STREAM; + } + + if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_REQUEST_SAMPLE, &command))) + { + command->u.request_sample.stream = stream; + if (token) + IUnknown_AddRef(token); + command->u.request_sample.token = token; + + /* Once pause support is added, this will need to put into a stream queue, and synchronization will need to be added*/ + hr = MFPutWorkItem(stream->parent_source->async_commands_queue, &stream->parent_source->async_commands_callback, &command->IUnknown_iface); + } + + return hr; }
static const IMFMediaStreamVtbl media_stream_vtbl = @@ -738,6 +824,7 @@ static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD object->stream_id = stream_id;
object->state = STREAM_INACTIVE; + object->eos = FALSE;
if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail; diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 9aa17ad00ab..f28ef8f117c 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -750,3 +750,72 @@ GstCaps *caps_from_mf_media_type(IMFMediaType *type) FIXME("Unrecognized major type %s\n", debugstr_guid(&major_type)); return NULL; } + +/* IMFSample = GstBuffer + IMFBuffer = GstMemory */ + +/* TODO: Future optimization could be to create a custom + IMFMediaBuffer wrapper around GstMemory, and to utilize + gst_memory_new_wrapped on IMFMediaBuffer data. However, + this wouldn't work if we allow the callers to allocate + the buffers. */ + +IMFSample* mf_sample_from_gst_buffer(GstBuffer *gst_buffer) +{ + IMFMediaBuffer *mf_buffer = NULL; + GstMapInfo map_info = {0}; + LONGLONG duration, time; + BYTE *mapped_buf = NULL; + IMFSample *out = NULL; + HRESULT hr; + + if (FAILED(hr = MFCreateSample(&out))) + goto done; + + duration = GST_BUFFER_DURATION(gst_buffer); + time = GST_BUFFER_PTS(gst_buffer); + + if (FAILED(hr = IMFSample_SetSampleDuration(out, duration / 100))) + goto done; + + if (FAILED(hr = IMFSample_SetSampleTime(out, time / 100))) + goto done; + + if (!gst_buffer_map(gst_buffer, &map_info, GST_MAP_READ)) + { + hr = E_FAIL; + goto done; + } + + if (FAILED(hr = MFCreateMemoryBuffer(map_info.maxsize, &mf_buffer))) + goto done; + + if (FAILED(hr = IMFMediaBuffer_Lock(mf_buffer, &mapped_buf, NULL, NULL))) + goto done; + + memcpy(mapped_buf, map_info.data, map_info.size); + + if (FAILED(hr = IMFMediaBuffer_Unlock(mf_buffer))) + goto done; + + if (FAILED(hr = IMFMediaBuffer_SetCurrentLength(mf_buffer, map_info.size))) + goto done; + + if (FAILED(hr = IMFSample_AddBuffer(out, mf_buffer))) + goto done; + +done: + if (mf_buffer) + IMFMediaBuffer_Release(mf_buffer); + if (map_info.data) + gst_buffer_unmap(gst_buffer, &map_info); + if (FAILED(hr)) + { + ERR("Failed to copy IMFSample to GstBuffer, hr = %#x\n", hr); + if (out) + IMFSample_Release(out); + out = NULL; + } + + return out; +}
Hi,
While running your changed tests, I think I found new failures. Being a bot and all I'm not very good at pattern recognition, so I might be wrong, but could you please double-check?
Full results can be found at: https://testbot.winehq.org/JobDetails.pl?Key=79964
Your paranoid android.
=== debiant (32 bit Japanese:Japan report) ===
mfplat: mfplat.c:2694: Test failed: Unexpected counter value 0. mfplat.c:2771: Test failed: Unexpected return value 0x102. Unhandled exception: page fault on execute access to 0x0a2e7823 in 32-bit code (0x0a2e7823).
Report validation errors: mfplat:mfplat crashed (c0000005)
On 10/6/20 10:59 AM, Derek Lesho wrote:
Signed-off-by: Derek Lesho dlesho@codeweavers.com
v2: Addressed comments.
dlls/mfplat/tests/mfplat.c | 4 -- dlls/winegstreamer/gst_private.h | 1 + dlls/winegstreamer/media_source.c | 89 ++++++++++++++++++++++++++++++- dlls/winegstreamer/mfplat.c | 69 ++++++++++++++++++++++++ 4 files changed, 158 insertions(+), 5 deletions(-)
diff --git a/dlls/mfplat/tests/mfplat.c b/dlls/mfplat/tests/mfplat.c index bffce2bc114..c54113e5588 100644 --- a/dlls/mfplat/tests/mfplat.c +++ b/dlls/mfplat/tests/mfplat.c @@ -623,13 +623,10 @@ todo_wine hr = IMFMediaStream_RequestSample(video_stream, NULL); if (i == sample_count) break; -todo_wine ok(hr == S_OK, "Failed to request sample %u, hr %#x.\n", i + 1, hr); if (hr != S_OK) break; }
if (FAILED(hr))
goto skip_source_tests;
for (i = 0; i < sample_count; ++i) {
@@ -670,7 +667,6 @@ todo_wine
get_event((IMFMediaEventGenerator *)mediasource, MEEndOfPresentation, NULL);
-skip_source_tests: IMFMediaStream_Release(video_stream); IMFMediaTypeHandler_Release(handler); IMFPresentationDescriptor_Release(descriptor); diff --git a/dlls/winegstreamer/gst_private.h b/dlls/winegstreamer/gst_private.h index 07556802a51..ff5aff42482 100644 --- a/dlls/winegstreamer/gst_private.h +++ b/dlls/winegstreamer/gst_private.h @@ -58,6 +58,7 @@ extern HRESULT mfplat_get_class_object(REFCLSID rclsid, REFIID riid, void **obj) HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN; IMFMediaType *mf_media_type_from_caps(const GstCaps *caps) DECLSPEC_HIDDEN; GstCaps *caps_from_mf_media_type(IMFMediaType *type) DECLSPEC_HIDDEN; +IMFSample *mf_sample_from_gst_buffer(GstBuffer *in) DECLSPEC_HIDDEN;
HRESULT winegstreamer_stream_handler_create(REFIID riid, void **obj) DECLSPEC_HIDDEN;
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index 5eb4465fea6..1593a60dfe1 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -64,6 +64,7 @@ struct media_stream enum source_async_op { SOURCE_ASYNC_START,
- SOURCE_ASYNC_REQUEST_SAMPLE,
};
struct source_async_command @@ -79,6 +80,11 @@ struct source_async_command GUID format; PROPVARIANT position; } start;
struct
{
struct media_stream *stream;
IUnknown *token;
} u;} request_sample;
};
@@ -341,6 +347,59 @@ static void start_pipeline(struct media_source *source, struct source_async_comm gst_element_get_state(source->container, NULL, NULL, -1); }
+static void dispatch_end_of_presentation(struct media_source *source) +{
- PROPVARIANT empty = {.vt = VT_EMPTY};
- unsigned int i;
- /* A stream has ended, check whether all have */
- for (i = 0; i < source->stream_count; i++)
- {
struct media_stream *stream = source->streams[i];
if (stream->state != STREAM_INACTIVE && !stream->eos)
return;
- }
- IMFMediaEventQueue_QueueEventParamVar(source->event_queue, MEEndOfPresentation, &GUID_NULL, S_OK, &empty);
+}
+static void wait_on_sample(struct media_stream *stream, IUnknown *token) +{
- PROPVARIANT empty_var = {.vt = VT_EMPTY};
- GstSample *gst_sample;
- GstBuffer *buffer;
- IMFSample *sample;
- TRACE("%p, %p\n", stream, token);
- g_signal_emit_by_name(stream->appsink, "pull-sample", &gst_sample);
- if (gst_sample)
- {
buffer = gst_sample_get_buffer(gst_sample);
TRACE("PTS = %llu\n", (unsigned long long int) GST_BUFFER_PTS(buffer));
sample = mf_sample_from_gst_buffer(buffer);
gst_sample_unref(gst_sample);
if (token)
IMFSample_SetUnknown(sample, &MFSampleExtension_Token, token);
IMFMediaEventQueue_QueueEventParamUnk(stream->event_queue, MEMediaSample, &GUID_NULL, S_OK, (IUnknown *)sample);
IMFSample_Release(sample);
- }
- g_object_get(stream->appsink, "eos", &stream->eos, NULL);
- if (stream->eos)
- {
if (token)
IUnknown_Release(token);
IMFMediaEventQueue_QueueEventParamVar(stream->event_queue, MEEndOfStream, &GUID_NULL, S_OK, &empty_var);
dispatch_end_of_presentation(stream->parent_source);
- }
+}
static HRESULT WINAPI source_async_commands_Invoke(IMFAsyncCallback *iface, IMFAsyncResult *result) { struct media_source *source = impl_from_async_commands_callback_IMFAsyncCallback(iface); @@ -360,6 +419,9 @@ static HRESULT WINAPI source_async_commands_Invoke(IMFAsyncCallback *iface, IMFA case SOURCE_ASYNC_START: start_pipeline(source, command); break;
case SOURCE_ASYNC_REQUEST_SAMPLE:
wait_on_sample(command->u.request_sample.stream, command->u.request_sample.token);
break;
}
IUnknown_Release(state);
@@ -647,13 +709,37 @@ static HRESULT WINAPI media_stream_GetStreamDescriptor(IMFMediaStream* iface, IM static HRESULT WINAPI media_stream_RequestSample(IMFMediaStream *iface, IUnknown *token) { struct media_stream *stream = impl_from_IMFMediaStream(iface);
struct source_async_command *command;
HRESULT hr;
TRACE("(%p)->(%p)\n", iface, token);
if (stream->state == STREAM_SHUTDOWN) return MF_E_SHUTDOWN;
- return E_NOTIMPL;
- if (stream->state == STREAM_INACTIVE)
- {
WARN("Stream isn't active\n");
return MF_E_MEDIA_SOURCE_WRONGSTATE;
- }
- if (stream->eos)
- {
return MF_E_END_OF_STREAM;
- }
- if (SUCCEEDED(hr = source_create_async_op(SOURCE_ASYNC_REQUEST_SAMPLE, &command)))
- {
command->u.request_sample.stream = stream;
if (token)
IUnknown_AddRef(token);
command->u.request_sample.token = token;
/* Once pause support is added, this will need to put into a stream queue, and synchronization will need to be added*/
hr = MFPutWorkItem(stream->parent_source->async_commands_queue, &stream->parent_source->async_commands_callback, &command->IUnknown_iface);
- }
- return hr;
}
static const IMFMediaStreamVtbl media_stream_vtbl = @@ -738,6 +824,7 @@ static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD object->stream_id = stream_id;
object->state = STREAM_INACTIVE;
- object->eos = FALSE;
This doesn't look like it belongs in this patch.
if (FAILED(hr = MFCreateEventQueue(&object->event_queue))) goto fail;
diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index 9aa17ad00ab..f28ef8f117c 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -750,3 +750,72 @@ GstCaps *caps_from_mf_media_type(IMFMediaType *type) FIXME("Unrecognized major type %s\n", debugstr_guid(&major_type)); return NULL; }
+/* IMFSample = GstBuffer
- IMFBuffer = GstMemory */
+/* TODO: Future optimization could be to create a custom
- IMFMediaBuffer wrapper around GstMemory, and to utilize
- gst_memory_new_wrapped on IMFMediaBuffer data. However,
- this wouldn't work if we allow the callers to allocate
- the buffers. */
+IMFSample* mf_sample_from_gst_buffer(GstBuffer *gst_buffer) +{
- IMFMediaBuffer *mf_buffer = NULL;
- GstMapInfo map_info = {0};
- LONGLONG duration, time;
- BYTE *mapped_buf = NULL;
- IMFSample *out = NULL;
- HRESULT hr;
- if (FAILED(hr = MFCreateSample(&out)))
goto done;
- duration = GST_BUFFER_DURATION(gst_buffer);
- time = GST_BUFFER_PTS(gst_buffer);
- if (FAILED(hr = IMFSample_SetSampleDuration(out, duration / 100)))
goto done;
- if (FAILED(hr = IMFSample_SetSampleTime(out, time / 100)))
goto done;
- if (!gst_buffer_map(gst_buffer, &map_info, GST_MAP_READ))
- {
hr = E_FAIL;
goto done;
- }
- if (FAILED(hr = MFCreateMemoryBuffer(map_info.maxsize, &mf_buffer)))
goto done;
- if (FAILED(hr = IMFMediaBuffer_Lock(mf_buffer, &mapped_buf, NULL, NULL)))
goto done;
- memcpy(mapped_buf, map_info.data, map_info.size);
- if (FAILED(hr = IMFMediaBuffer_Unlock(mf_buffer)))
goto done;
- if (FAILED(hr = IMFMediaBuffer_SetCurrentLength(mf_buffer, map_info.size)))
goto done;
- if (FAILED(hr = IMFSample_AddBuffer(out, mf_buffer)))
goto done;
+done:
- if (mf_buffer)
IMFMediaBuffer_Release(mf_buffer);
- if (map_info.data)
gst_buffer_unmap(gst_buffer, &map_info);
- if (FAILED(hr))
- {
ERR("Failed to copy IMFSample to GstBuffer, hr = %#x\n", hr);
if (out)
IMFSample_Release(out);
out = NULL;
- }
- return out;
+}
On 10/6/20 10:02 PM, Zebediah Figura wrote:
On 10/6/20 10:59 AM, Derek Lesho wrote:
@@ -738,6 +824,7 @@ static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD object->stream_id = stream_id;
object->state = STREAM_INACTIVE;
- object->eos = FALSE;
This doesn't look like it belongs in this patch.
Actually, it looks like we don't use the EOS state until this patch, so I should move all of that to this patch from patch 4.
On 10/6/20 10:59 AM, Derek Lesho wrote:
Signed-off-by: Derek Lesho dlesho@codeweavers.com
v2:
- Use fixed list of supported formats in the videoconvert path instead of querying the template.
- Moved code setting "caps" appsink property to patch 4, where it begins to be used.
- Addressed some comments.
dlls/winegstreamer/media_source.c | 124 ++++++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 14 deletions(-)
diff --git a/dlls/winegstreamer/media_source.c b/dlls/winegstreamer/media_source.c index 5f3c43a0204..cbee412030d 100644 --- a/dlls/winegstreamer/media_source.c +++ b/dlls/winegstreamer/media_source.c @@ -384,6 +384,43 @@ static const IMFMediaStreamVtbl media_stream_vtbl = media_stream_RequestSample };
+/* Setup a chain of elements which should hopefully allow transformations to any IMFMediaType
- the user throws at us through gstreamer's caps negotiation. */
+static HRESULT media_stream_connect_to_sink(struct media_stream *stream) +{
- GstCaps *source_caps = gst_pad_query_caps(stream->their_src, NULL);
- const gchar *stream_type;
- if (!source_caps)
return E_FAIL;
- stream_type = gst_structure_get_name(gst_caps_get_structure(source_caps, 0));
- gst_caps_unref(source_caps);
- if (!strcmp(stream_type, "video/x-raw"))
- {
GstElement *videoconvert = gst_element_factory_make("videoconvert", NULL);
gst_bin_add(GST_BIN(stream->parent_source->container), videoconvert);
stream->my_sink = gst_element_get_static_pad(videoconvert, "sink");
if (!gst_element_link(videoconvert, stream->appsink))
return E_FAIL;
gst_element_sync_state_with_parent(videoconvert);
- }
- else
- {
stream->my_sink = gst_element_get_static_pad(stream->appsink, "sink");
- }
- if (gst_pad_link(stream->their_src, stream->my_sink) != GST_PAD_LINK_OK)
return E_FAIL;
- return S_OK;
+}
static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD stream_id, struct media_stream **out_stream) { struct media_stream *object = heap_alloc_zero(sizeof(*object)); @@ -414,8 +451,8 @@ static HRESULT new_media_stream(struct media_source *source, GstPad *pad, DWORD g_object_set(object->appsink, "sync", FALSE, NULL); g_object_set(object->appsink, "max-buffers", 5, NULL);
- object->my_sink = gst_element_get_static_pad(object->appsink, "sink");
- gst_pad_link(object->their_src, object->my_sink);
if (FAILED(hr = media_stream_connect_to_sink(object)))
goto fail;
gst_element_sync_state_with_parent(object->appsink);
@@ -435,28 +472,87 @@ static HRESULT media_stream_init_desc(struct media_stream *stream) { GstCaps *current_caps = gst_pad_get_current_caps(stream->their_src); IMFMediaTypeHandler *type_handler;
- IMFMediaType **stream_types = NULL; IMFMediaType *stream_type = NULL;
- DWORD type_count = 0;
- const gchar *major_type;
- unsigned int i; HRESULT hr;
- stream_type = mf_media_type_from_caps(current_caps);
- gst_caps_unref(current_caps);
- if (!stream_type)
return E_FAIL;
- major_type = gst_structure_get_name(gst_caps_get_structure(current_caps, 0));
- if (!strcmp(major_type, "video/x-raw"))
- {
/* These are the most common native output types of decoders: https://docs.microsoft.com/en-us/windows/win32/medfound/mft-decoder-expose-output-types-in-native-order */
This line is very long.
static const GUID * video_types[] =
"static const GUID *const video_types[]"
{
&MFVideoFormat_NV12,
&MFVideoFormat_YV12,
&MFVideoFormat_YUY2,
&MFVideoFormat_IYUV,
&MFVideoFormat_I420,
};
- hr = MFCreateStreamDescriptor(stream->stream_id, 1, &stream_type, &stream->descriptor);
IMFMediaType *base_type = mf_media_type_from_caps(current_caps);
GUID base_subtype;
- IMFMediaType_Release(stream_type);
IMFMediaType_GetGUID(base_type, &MF_MT_SUBTYPE, &base_subtype);
- if (FAILED(hr))
return hr;
stream_types = heap_alloc( sizeof(IMFMediaType *) * ARRAY_SIZE(video_types) + 1);
- if (FAILED(hr = IMFStreamDescriptor_GetMediaTypeHandler(stream->descriptor, &type_handler)))
return hr;
stream_types[0] = base_type;
type_count = 1;
- hr = IMFMediaTypeHandler_SetCurrentMediaType(type_handler, stream_type);
for (i = 0; i < ARRAY_SIZE(video_types); i++)
{
IMFMediaType *new_type;
- IMFMediaTypeHandler_Release(type_handler);
if (IsEqualGUID(&base_subtype, video_types[i]))
continue;
if (FAILED(hr = MFCreateMediaType(&new_type)))
goto done;
stream_types[type_count++] = new_type;
if (FAILED(hr = IMFMediaType_CopyAllItems(base_type, (IMFAttributes *) new_type)))
goto done;
if (FAILED(hr = IMFMediaType_SetGUID(new_type, &MF_MT_SUBTYPE, video_types[i])))
goto done;
}
}
else
{
stream_type = mf_media_type_from_caps(current_caps);
if (stream_type)
{
stream_types = &stream_type;
type_count = 1;
}
}
if (!type_count)
{
ERR("Failed to establish an IMFMediaType from any of the possible stream caps!\n");
return E_FAIL;
}
if (FAILED(hr = MFCreateStreamDescriptor(stream->stream_id, type_count, stream_types, &stream->descriptor)))
goto done;
if (FAILED(hr = IMFStreamDescriptor_GetMediaTypeHandler(stream->descriptor, &type_handler)))
goto done;
if (FAILED(hr = IMFMediaTypeHandler_SetCurrentMediaType(type_handler, stream_types[0])))
goto done;
+done:
- gst_caps_unref(current_caps);
- if (type_handler)
IMFMediaTypeHandler_Release(type_handler);
- for (i = 0; i < type_count; i++)
IMFMediaType_Release(stream_types[i]);
- if (stream_types != &stream_type)
return hr;heap_free(stream_types);
}