-- v5: winegstreamer: Respect video format padding for input buffers too. winegstreamer: Normalize video processor and color converter apertures. winegstreamer: Normalize both input and output media types stride at once. winegstreamer: Use video info stride in buffer meta rather than videoflip. winegstreamer: Keep the input caps on the transform. winegstreamer: Use a new wg_video_buffer_pool class to add buffer meta. winegstreamer: Only use pool and set buffer meta for raw video frames.
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 614125522a8..018131f4f97 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -131,6 +131,7 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, gsize plane_align = transform->attrs.output_plane_align; GstStructure *config, *params; GstVideoAlignment align; + const char *mime_type; gboolean needs_pool; GstBufferPool *pool; GstVideoInfo info; @@ -139,7 +140,9 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, GST_LOG("transform %p, %"GST_PTR_FORMAT, transform, query);
gst_query_parse_allocation(query, &caps, &needs_pool); - if (stream_type_from_caps(caps) != GST_STREAM_TYPE_VIDEO || !needs_pool) + + mime_type = gst_structure_get_name(gst_caps_get_structure(caps, 0)); + if (strcmp(mime_type, "video/x-raw") || !needs_pool) return false;
if (!gst_video_info_from_caps(&info, caps) @@ -912,6 +915,7 @@ NTSTATUS wg_transform_read_data(void *args) struct wg_sample *sample = params->sample; GstVideoAlignment align = {0}; GstBuffer *output_buffer; + const char *output_mime; GstCaps *output_caps; bool discard_data; NTSTATUS status; @@ -927,8 +931,9 @@ NTSTATUS wg_transform_read_data(void *args)
output_buffer = gst_sample_get_buffer(transform->output_sample); output_caps = gst_sample_get_caps(transform->output_sample); + output_mime = gst_structure_get_name(gst_caps_get_structure(output_caps, 0));
- if (stream_type_from_caps(output_caps) == GST_STREAM_TYPE_VIDEO) + if (!strcmp(output_mime, "video/x-raw")) { gsize plane_align = transform->attrs.output_plane_align;
@@ -949,7 +954,7 @@ NTSTATUS wg_transform_read_data(void *args) return STATUS_SUCCESS; }
- if (stream_type_from_caps(output_caps) == GST_STREAM_TYPE_VIDEO) + if (!strcmp(output_mime, "video/x-raw")) status = read_transform_output_video(sample, output_buffer, &src_video_info, &dst_video_info); else
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 84 +++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 26 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 018131f4f97..a6f12d41080 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -82,6 +82,57 @@ static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVi gst_video_info_align(info, align); }
+typedef struct +{ + GstVideoBufferPool parent; + GstVideoInfo info; +} WgVideoBufferPool; + +typedef struct +{ + GstVideoBufferPoolClass parent_class; +} WgVideoBufferPoolClass; + +G_DEFINE_TYPE(WgVideoBufferPool, wg_video_buffer_pool, GST_TYPE_VIDEO_BUFFER_POOL); + +static void wg_video_buffer_pool_init(WgVideoBufferPool *pool) +{ +} + +static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) +{ +} + +static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane_align, + GstAllocator *allocator, GstVideoAlignment *align) +{ + WgVideoBufferPool *pool; + GstStructure *config; + + if (!(pool = g_object_new(wg_video_buffer_pool_get_type(), NULL))) + return NULL; + + gst_video_info_from_caps(&pool->info, caps); + align_video_info_planes(plane_align, &pool->info, align); + + if (!(config = gst_buffer_pool_get_config(GST_BUFFER_POOL(pool)))) + GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); + else + { + gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META); + gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); + gst_buffer_pool_config_set_video_alignment(config, align); + + gst_buffer_pool_config_set_params(config, caps, pool->info.size, 0, 0); + gst_buffer_pool_config_set_allocator(config, allocator, NULL); + if (!gst_buffer_pool_set_config(GST_BUFFER_POOL(pool), config)) + GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); + } + + GST_INFO("Created %"GST_PTR_FORMAT, pool); + return pool; +} + static GstFlowReturn transform_sink_chain_cb(GstPad *pad, GstObject *parent, GstBuffer *buffer) { struct wg_transform *transform = gst_pad_get_element_private(pad); @@ -128,13 +179,11 @@ static gboolean transform_src_query_cb(GstPad *pad, GstObject *parent, GstQuery
static gboolean transform_sink_query_allocation(struct wg_transform *transform, GstQuery *query) { - gsize plane_align = transform->attrs.output_plane_align; - GstStructure *config, *params; + WgVideoBufferPool *pool; GstVideoAlignment align; const char *mime_type; + GstStructure *params; gboolean needs_pool; - GstBufferPool *pool; - GstVideoInfo info; GstCaps *caps;
GST_LOG("transform %p, %"GST_PTR_FORMAT, transform, query); @@ -145,12 +194,10 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, if (strcmp(mime_type, "video/x-raw") || !needs_pool) return false;
- if (!gst_video_info_from_caps(&info, caps) - || !(pool = gst_video_buffer_pool_new())) + if (!(pool = wg_video_buffer_pool_create(caps, transform->attrs.output_plane_align, + transform->allocator, &align))) return false;
- align_video_info_planes(plane_align, &info, &align); - if ((params = gst_structure_new("video-meta", "padding-top", G_TYPE_UINT, align.padding_top, "padding-bottom", G_TYPE_UINT, align.padding_bottom, @@ -162,30 +209,15 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, gst_structure_free(params); }
- if (!(config = gst_buffer_pool_get_config(pool))) - GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); - else - { - gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META); - gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); - gst_buffer_pool_config_set_video_alignment(config, &align); - - gst_buffer_pool_config_set_params(config, caps, - info.size, 0, 0); - gst_buffer_pool_config_set_allocator(config, transform->allocator, NULL); - if (!gst_buffer_pool_set_config(pool, config)) - GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); - } - /* Prevent pool reconfiguration, we don't want another alignment. */ - if (!gst_buffer_pool_set_active(pool, true)) + if (!gst_buffer_pool_set_active(GST_BUFFER_POOL(pool), true)) GST_ERROR("%"GST_PTR_FORMAT" failed to activate.", pool);
- gst_query_add_allocation_pool(query, pool, info.size, 0, 0); + gst_query_add_allocation_pool(query, GST_BUFFER_POOL(pool), pool->info.size, 0, 0); gst_query_add_allocation_param(query, transform->allocator, NULL);
GST_INFO("Proposing %"GST_PTR_FORMAT", buffer size %#zx, %"GST_PTR_FORMAT", for %"GST_PTR_FORMAT, - pool, info.size, transform->allocator, query); + pool, pool->info.size, transform->allocator, query);
g_object_unref(pool); return true;
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 39 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index a6f12d41080..ac036fcce59 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -61,6 +61,7 @@ struct wg_transform bool output_caps_changed; GstCaps *desired_caps; GstCaps *output_caps; + GstCaps *input_caps; };
static struct wg_transform *get_transform(wg_transform_t trans) @@ -365,6 +366,7 @@ NTSTATUS wg_transform_destroy(void *args) gst_query_unref(transform->drain_query); gst_caps_unref(transform->desired_caps); gst_caps_unref(transform->output_caps); + gst_caps_unref(transform->input_caps); gst_atomic_queue_unref(transform->output_queue); free(transform);
@@ -405,7 +407,7 @@ NTSTATUS wg_transform_create(void *args) { struct wg_transform_create_params *params = args; GstElement *first = NULL, *last = NULL, *element; - GstCaps *sink_caps = NULL, *src_caps = NULL, *parsed_caps = NULL; + GstCaps *sink_caps = NULL, *parsed_caps = NULL; NTSTATUS status = STATUS_UNSUCCESSFUL; const gchar *input_mime, *output_mime; GstPadTemplate *template = NULL; @@ -427,27 +429,31 @@ NTSTATUS wg_transform_create(void *args) goto out; transform->attrs = params->attrs;
+ if (!(transform->input_caps = caps_from_media_type(¶ms->input_type))) + goto out; + GST_INFO("transform %p input caps %"GST_PTR_FORMAT, transform, transform->input_caps); + input_mime = gst_structure_get_name(gst_caps_get_structure(transform->input_caps, 0)); + + if (!(transform->output_caps = caps_from_media_type(¶ms->output_type))) + goto out; + GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, transform->output_caps); + output_mime = gst_structure_get_name(gst_caps_get_structure(transform->output_caps, 0)); + if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video)) transform->input_info = params->input_type.u.video->videoInfo; if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) output_info = params->output_type.u.video->videoInfo;
- if (!(src_caps = caps_from_media_type(¶ms->input_type))) - goto out; - if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, src_caps))) + if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; transform->my_src = gst_pad_new_from_template(template, "src"); g_object_unref(template); if (!transform->my_src) goto out;
- GST_INFO("transform %p input caps %"GST_PTR_FORMAT, transform, src_caps); - gst_pad_set_element_private(transform->my_src, transform); gst_pad_set_query_function(transform->my_src, transform_src_query_cb);
- if (!(transform->output_caps = caps_from_media_type(¶ms->output_type))) - goto out; transform->desired_caps = gst_caps_ref(transform->output_caps); if (!(template = gst_pad_template_new("sink", GST_PAD_SINK, GST_PAD_ALWAYS, transform->output_caps))) goto out; @@ -456,34 +462,30 @@ NTSTATUS wg_transform_create(void *args) if (!transform->my_sink) goto out;
- GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, transform->output_caps); - gst_pad_set_element_private(transform->my_sink, transform); gst_pad_set_event_function(transform->my_sink, transform_sink_event_cb); gst_pad_set_query_function(transform->my_sink, transform_sink_query_cb); gst_pad_set_chain_function(transform->my_sink, transform_sink_chain_cb);
- input_mime = gst_structure_get_name(gst_caps_get_structure(src_caps, 0)); - if (!(parsed_caps = transform_get_parsed_caps(src_caps, input_mime))) + if (!(parsed_caps = transform_get_parsed_caps(transform->input_caps, input_mime))) goto out;
/* Since we append conversion elements, we don't want to filter decoders * based on the actual output caps now. Matching decoders with the * raw output media type should be enough. */ - output_mime = gst_structure_get_name(gst_caps_get_structure(transform->output_caps, 0)); if (!(sink_caps = gst_caps_new_empty_simple(output_mime))) goto out;
if (strcmp(input_mime, "audio/x-raw") && strcmp(input_mime, "video/x-raw")) { - if ((element = find_element(GST_ELEMENT_FACTORY_TYPE_PARSER, src_caps, parsed_caps)) + if ((element = find_element(GST_ELEMENT_FACTORY_TYPE_PARSER, transform->input_caps, parsed_caps)) && !append_element(transform->container, element, &first, &last)) goto out; else if (!element) { gst_caps_unref(parsed_caps); - parsed_caps = gst_caps_ref(src_caps); + parsed_caps = gst_caps_ref(transform->input_caps); }
if (!(element = find_element(GST_ELEMENT_FACTORY_TYPE_DECODER, parsed_caps, sink_caps)) @@ -560,7 +562,7 @@ NTSTATUS wg_transform_create(void *args) if (!(event = gst_event_new_stream_start("stream")) || !push_event(transform->my_src, event)) goto out; - if (!(event = gst_event_new_caps(src_caps)) + if (!(event = gst_event_new_caps(transform->input_caps)) || !push_event(transform->my_src, event)) goto out;
@@ -574,7 +576,6 @@ NTSTATUS wg_transform_create(void *args) goto out;
gst_caps_unref(parsed_caps); - gst_caps_unref(src_caps); gst_caps_unref(sink_caps);
GST_INFO("Created winegstreamer transform %p.", transform); @@ -590,8 +591,8 @@ out: gst_caps_unref(transform->output_caps); if (transform->my_src) gst_object_unref(transform->my_src); - if (src_caps) - gst_caps_unref(src_caps); + if (transform->input_caps) + gst_caps_unref(transform->input_caps); if (parsed_caps) gst_caps_unref(parsed_caps); if (sink_caps)
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 108 +++++++++++++++++++++--------- 1 file changed, 76 insertions(+), 32 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index ac036fcce59..2537331a118 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -53,8 +53,8 @@ struct wg_transform GstQuery *drain_query;
GstAtomicQueue *input_queue; - GstElement *video_flip; MFVideoInfo input_info; + MFVideoInfo output_info;
GstAtomicQueue *output_queue; GstSample *output_sample; @@ -69,7 +69,8 @@ static struct wg_transform *get_transform(wg_transform_t trans) return (struct wg_transform *)(ULONG_PTR)trans; }
-static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVideoAlignment *align) +static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, + GstVideoInfo *info, GstVideoAlignment *align) { gst_video_alignment_reset(align);
@@ -81,6 +82,15 @@ static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVi align->stride_align[3] = plane_align;
gst_video_info_align(info, align); + + if (video_info->VideoFlags & MFVideoFlag_BottomUpLinearRep) + { + for (guint i = 0; i < ARRAY_SIZE(info->offset); ++i) + { + info->offset[i] += (info->height - 1) * info->stride[i]; + info->stride[i] = -info->stride[i]; + } + } }
typedef struct @@ -96,16 +106,53 @@ typedef struct
G_DEFINE_TYPE(WgVideoBufferPool, wg_video_buffer_pool, GST_TYPE_VIDEO_BUFFER_POOL);
+static void buffer_add_video_meta(GstBuffer *buffer, GstVideoInfo *info) +{ + GstVideoMeta *meta; + + if (!(meta = gst_buffer_get_video_meta(buffer))) + meta = gst_buffer_add_video_meta(buffer, GST_VIDEO_FRAME_FLAG_NONE, + info->finfo->format, info->width, info->height); + + if (!meta) + GST_ERROR("Failed to add video meta to buffer %"GST_PTR_FORMAT, buffer); + else + { + memcpy(meta->offset, info->offset, sizeof(info->offset)); + memcpy(meta->stride, info->stride, sizeof(info->stride)); + } +} + +static GstFlowReturn wg_video_buffer_pool_alloc_buffer(GstBufferPool *gst_pool, GstBuffer **buffer, + GstBufferPoolAcquireParams *params) +{ + GstBufferPoolClass *parent_class = GST_BUFFER_POOL_CLASS(wg_video_buffer_pool_parent_class); + WgVideoBufferPool *pool = (WgVideoBufferPool *)gst_pool; + GstFlowReturn ret; + + GST_LOG("%"GST_PTR_FORMAT", buffer %p, params %p", pool, buffer, params); + + if (!(ret = parent_class->alloc_buffer(gst_pool, buffer, params))) + { + buffer_add_video_meta(*buffer, &pool->info); + GST_INFO("%"GST_PTR_FORMAT" allocated buffer %"GST_PTR_FORMAT, pool, *buffer); + } + + return ret; +} + static void wg_video_buffer_pool_init(WgVideoBufferPool *pool) { }
static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) { + GstBufferPoolClass *pool_class = GST_BUFFER_POOL_CLASS(klass); + pool_class->alloc_buffer = wg_video_buffer_pool_alloc_buffer; }
static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane_align, - GstAllocator *allocator, GstVideoAlignment *align) + GstAllocator *allocator, MFVideoInfo *video_info, GstVideoAlignment *align) { WgVideoBufferPool *pool; GstStructure *config; @@ -114,7 +161,7 @@ static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane return NULL;
gst_video_info_from_caps(&pool->info, caps); - align_video_info_planes(plane_align, &pool->info, align); + align_video_info_planes(video_info, plane_align, &pool->info, align);
if (!(config = gst_buffer_pool_get_config(GST_BUFFER_POOL(pool)))) GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); @@ -196,7 +243,7 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, return false;
if (!(pool = wg_video_buffer_pool_create(caps, transform->attrs.output_plane_align, - transform->allocator, &align))) + transform->allocator, &transform->output_info, &align))) return false;
if ((params = gst_structure_new("video-meta", @@ -412,7 +459,6 @@ NTSTATUS wg_transform_create(void *args) const gchar *input_mime, *output_mime; GstPadTemplate *template = NULL; struct wg_transform *transform; - MFVideoInfo output_info = {0}; GstEvent *event;
if (!(transform = calloc(1, sizeof(*transform)))) @@ -442,7 +488,7 @@ NTSTATUS wg_transform_create(void *args) if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video)) transform->input_info = params->input_type.u.video->videoInfo; if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) - output_info = params->output_type.u.video->videoInfo; + transform->output_info = params->output_type.u.video->videoInfo;
if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; @@ -529,15 +575,6 @@ NTSTATUS wg_transform_create(void *args) } else { - if (!(element = create_element("videoconvert", "base")) - || !append_element(transform->container, element, &first, &last)) - goto out; - if (!(transform->video_flip = create_element("videoflip", "base")) - || !append_element(transform->container, transform->video_flip, &first, &last)) - goto out; - - if ((transform->input_info.VideoFlags ^ output_info.VideoFlags) & MFVideoFlag_BottomUpLinearRep) - gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", "vertical-flip"); if (!(element = create_element("videoconvert", "base")) || !append_element(transform->container, element, &first, &last)) goto out; @@ -635,19 +672,18 @@ NTSTATUS wg_transform_set_output_type(void *args) { struct wg_transform_set_output_type_params *params = args; struct wg_transform *transform = get_transform(params->transform); - MFVideoInfo output_info = {0}; GstCaps *caps, *stripped; GstSample *sample;
- if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) - output_info = params->media_type.u.video->videoInfo; - if (!(caps = caps_from_media_type(¶ms->media_type))) { GST_ERROR("Failed to convert media type to caps."); return STATUS_UNSUCCESSFUL; }
+ if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) + transform->output_info = params->media_type.u.video->videoInfo; + GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, caps);
stripped = caps_strip_fields(caps, transform->attrs.allow_format_change); @@ -668,16 +704,6 @@ NTSTATUS wg_transform_set_output_type(void *args) gst_caps_unref(transform->desired_caps); transform->desired_caps = caps;
- if (transform->video_flip) - { - const char *value; - - if ((transform->input_info.VideoFlags ^ output_info.VideoFlags) & MFVideoFlag_BottomUpLinearRep) - value = "vertical-flip"; - else - value = "none"; - gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", value); - } if (!push_event(transform->my_sink, gst_event_new_reconfigure())) { GST_ERROR("Failed to reconfigure transform %p.", transform); @@ -711,6 +737,8 @@ NTSTATUS wg_transform_push_data(void *args) struct wg_transform_push_data_params *params = args; struct wg_transform *transform = get_transform(params->transform); struct wg_sample *sample = params->sample; + const gchar *input_mime; + GstVideoInfo video_info; GstBuffer *buffer; guint length;
@@ -734,6 +762,14 @@ NTSTATUS wg_transform_push_data(void *args) GST_INFO("Wrapped %u/%u bytes from sample %p to %"GST_PTR_FORMAT, sample->size, sample->max_size, sample, buffer); }
+ input_mime = gst_structure_get_name(gst_caps_get_structure(transform->input_caps, 0)); + if (!strcmp(input_mime, "video/x-raw") && gst_video_info_from_caps(&video_info, transform->input_caps)) + { + GstVideoAlignment align; + align_video_info_planes(&transform->input_info, 0, &video_info, &align); + buffer_add_video_meta(buffer, &video_info); + } + if (sample->flags & WG_SAMPLE_FLAG_HAS_PTS) GST_BUFFER_PTS(buffer) = sample->pts * 100; if (sample->flags & WG_SAMPLE_FLAG_HAS_DURATION) @@ -969,13 +1005,21 @@ NTSTATUS wg_transform_read_data(void *args) if (!strcmp(output_mime, "video/x-raw")) { gsize plane_align = transform->attrs.output_plane_align; + GstVideoMeta *meta;
if (!gst_video_info_from_caps(&src_video_info, output_caps)) GST_ERROR("Failed to get video info from %"GST_PTR_FORMAT, output_caps); dst_video_info = src_video_info;
- /* set the desired output buffer alignment on the dest video info */ - align_video_info_planes(plane_align, &dst_video_info, &align); + /* set the desired output buffer alignment and stride on the dest video info */ + align_video_info_planes(&transform->output_info, plane_align, &dst_video_info, &align); + + /* copy the actual output buffer alignment and stride to the src video info */ + if ((meta = gst_buffer_get_video_meta(output_buffer))) + { + memcpy(src_video_info.offset, meta->offset, sizeof(meta->offset)); + memcpy(src_video_info.stride, meta->stride, sizeof(meta->stride)); + } }
if (GST_MINI_OBJECT_FLAG_IS_SET(transform->output_sample, GST_SAMPLE_FLAG_WG_CAPS_CHANGED))
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/video_processor.c | 42 +++++++++++++++++----------- 1 file changed, 25 insertions(+), 17 deletions(-)
diff --git a/dlls/winegstreamer/video_processor.c b/dlls/winegstreamer/video_processor.c index 4afdddcb036..5555cef8303 100644 --- a/dlls/winegstreamer/video_processor.c +++ b/dlls/winegstreamer/video_processor.c @@ -91,34 +91,47 @@ struct video_processor IMFVideoSampleAllocatorEx *allocator; };
-static HRESULT normalize_stride(IMFMediaType *media_type, BOOL bottom_up, IMFMediaType **ret) +static HRESULT normalize_media_types(BOOL bottom_up, IMFMediaType **input_type, IMFMediaType **output_type) { - MFVIDEOFORMAT *format; - LONG stride; + MFVIDEOFORMAT *input_format, *output_format; + BOOL normalize_input, normalize_output; UINT32 size; HRESULT hr;
- if (SUCCEEDED(hr = IMFMediaType_GetUINT32(media_type, &MF_MT_DEFAULT_STRIDE, (UINT32 *)&stride))) + normalize_input = FAILED(IMFMediaType_GetItem(*input_type, &MF_MT_DEFAULT_STRIDE, NULL)); + normalize_output = FAILED(IMFMediaType_GetItem(*output_type, &MF_MT_DEFAULT_STRIDE, NULL)); + + if (FAILED(hr = MFCreateMFVideoFormatFromMFMediaType(*input_type, &input_format, &size))) + return hr; + if (FAILED(hr = MFCreateMFVideoFormatFromMFMediaType(*output_type, &output_format, &size))) { - *ret = media_type; - IMFMediaType_AddRef(media_type); + CoTaskMemFree(input_format); return hr; }
- if (SUCCEEDED(hr = MFCreateMFVideoFormatFromMFMediaType(media_type, &format, &size))) + if (bottom_up && normalize_input) + input_format->videoInfo.VideoFlags |= MFVideoFlag_BottomUpLinearRep; + if (bottom_up && normalize_output) + output_format->videoInfo.VideoFlags |= MFVideoFlag_BottomUpLinearRep; + + if (FAILED(hr = MFCreateVideoMediaType(input_format, (IMFVideoMediaType **)input_type))) + goto done; + if (FAILED(hr = MFCreateVideoMediaType(output_format, (IMFVideoMediaType **)output_type))) { - if (bottom_up) format->videoInfo.VideoFlags |= MFVideoFlag_BottomUpLinearRep; - hr = MFCreateVideoMediaType(format, (IMFVideoMediaType **)ret); - CoTaskMemFree(format); + IMFMediaType_Release(*input_type); + *input_type = NULL; }
+done: + CoTaskMemFree(input_format); + CoTaskMemFree(output_format); return hr; }
static HRESULT try_create_wg_transform(struct video_processor *impl) { BOOL bottom_up = !impl->device_manager; /* when not D3D-enabled, the transform outputs bottom up RGB buffers */ - IMFMediaType *input_type, *output_type; + IMFMediaType *input_type = impl->input_type, *output_type = impl->output_type; struct wg_transform_attrs attrs = {0}; HRESULT hr;
@@ -128,13 +141,8 @@ static HRESULT try_create_wg_transform(struct video_processor *impl) impl->wg_transform = 0; }
- if (FAILED(hr = normalize_stride(impl->input_type, bottom_up, &input_type))) - return hr; - if (FAILED(hr = normalize_stride(impl->output_type, bottom_up, &output_type))) - { - IMFMediaType_Release(input_type); + if (FAILED(hr = normalize_media_types(bottom_up, &input_type, &output_type))) return hr; - } hr = wg_transform_create_mf(input_type, output_type, &attrs, &impl->wg_transform); IMFMediaType_Release(output_type); IMFMediaType_Release(input_type);
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/Makefile.in | 2 +- dlls/winegstreamer/color_convert.c | 58 +++++++++++++++++++++++++++- dlls/winegstreamer/gst_private.h | 23 +++++++++++ dlls/winegstreamer/video_processor.c | 22 +++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-)
diff --git a/dlls/winegstreamer/Makefile.in b/dlls/winegstreamer/Makefile.in index 447c2c74cf3..63ca3f61fdf 100644 --- a/dlls/winegstreamer/Makefile.in +++ b/dlls/winegstreamer/Makefile.in @@ -1,7 +1,7 @@ MODULE = winegstreamer.dll UNIXLIB = winegstreamer.so IMPORTLIB = winegstreamer -IMPORTS = strmbase ole32 oleaut32 msdmo msvcrt +IMPORTS = strmbase ole32 oleaut32 msdmo msvcrt user32 DELAYIMPORTS = mfplat mf UNIX_CFLAGS = $(GSTREAMER_CFLAGS) UNIX_LIBS = $(GSTREAMER_LIBS) $(PTHREAD_LIBS) diff --git a/dlls/winegstreamer/color_convert.c b/dlls/winegstreamer/color_convert.c index 21d2aa67e64..e466a9289b1 100644 --- a/dlls/winegstreamer/color_convert.c +++ b/dlls/winegstreamer/color_convert.c @@ -95,9 +95,59 @@ static inline struct color_convert *impl_from_IUnknown(IUnknown *iface) return CONTAINING_RECORD(iface, struct color_convert, IUnknown_inner); }
+static void update_video_aperture(MFVideoInfo *input_info, MFVideoInfo *output_info) +{ + static const MFVideoArea empty_area = {0}; + + /* Tests show that the color converter ignores aperture entirely, probably a side + * effect of an internal conversion to VIDEOINFOHEADER2, as the component is also + * exposing a IMediaObject interface, and designed for dshow. + */ + + input_info->GeometricAperture = empty_area; + input_info->MinimumDisplayAperture = empty_area; + input_info->PanScanAperture = empty_area; + + output_info->GeometricAperture = empty_area; + output_info->MinimumDisplayAperture = empty_area; + output_info->PanScanAperture = empty_area; +} + +static HRESULT normalize_media_types(IMFMediaType **input_type, IMFMediaType **output_type) +{ + MFVIDEOFORMAT *input_format, *output_format; + UINT32 size; + HRESULT hr; + + if (FAILED(hr = MFCreateMFVideoFormatFromMFMediaType(*input_type, &input_format, &size))) + return hr; + if (FAILED(hr = MFCreateMFVideoFormatFromMFMediaType(*output_type, &output_format, &size))) + { + CoTaskMemFree(input_format); + return hr; + } + + update_video_aperture(&input_format->videoInfo, &output_format->videoInfo); + + if (FAILED(hr = MFCreateVideoMediaType(input_format, (IMFVideoMediaType **)input_type))) + goto done; + if (FAILED(hr = MFCreateVideoMediaType(output_format, (IMFVideoMediaType **)output_type))) + { + IMFMediaType_Release(*input_type); + *input_type = NULL; + } + +done: + CoTaskMemFree(input_format); + CoTaskMemFree(output_format); + return hr; +} + static HRESULT try_create_wg_transform(struct color_convert *impl) { + IMFMediaType *input_type = impl->input_type, *output_type = impl->output_type; struct wg_transform_attrs attrs = {0}; + HRESULT hr;
if (impl->wg_transform) { @@ -105,7 +155,13 @@ static HRESULT try_create_wg_transform(struct color_convert *impl) impl->wg_transform = 0; }
- return wg_transform_create_mf(impl->input_type, impl->output_type, &attrs, &impl->wg_transform); + if (FAILED(hr = normalize_media_types(&input_type, &output_type))) + return hr; + hr = wg_transform_create_mf(input_type, output_type, &attrs, &impl->wg_transform); + IMFMediaType_Release(output_type); + IMFMediaType_Release(input_type); + + return hr; }
static HRESULT WINAPI unknown_QueryInterface(IUnknown *iface, REFIID iid, void **out) diff --git a/dlls/winegstreamer/gst_private.h b/dlls/winegstreamer/gst_private.h index 142de9bce9f..0f7d945ba37 100644 --- a/dlls/winegstreamer/gst_private.h +++ b/dlls/winegstreamer/gst_private.h @@ -43,6 +43,29 @@ bool array_reserve(void **elements, size_t *capacity, size_t count, size_t size)
#define MEDIATIME_FROM_BYTES(x) ((LONGLONG)(x) * 10000000)
+static inline BOOL is_mf_video_area_empty(const MFVideoArea *area) +{ + return !area->OffsetX.value && !area->OffsetY.value && !area->Area.cx && !area->Area.cy; +} + +static inline void get_mf_video_content_rect(const MFVideoInfo *info, RECT *rect) +{ + if (!is_mf_video_area_empty(&info->MinimumDisplayAperture)) + { + rect->left = info->MinimumDisplayAperture.OffsetX.value; + rect->top = info->MinimumDisplayAperture.OffsetY.value; + rect->right = rect->left + info->MinimumDisplayAperture.Area.cx; + rect->bottom = rect->top + info->MinimumDisplayAperture.Area.cy; + } + else + { + rect->left = 0; + rect->top = 0; + rect->right = info->dwWidth; + rect->bottom = info->dwHeight; + } +} + struct wg_sample_queue;
HRESULT wg_sample_queue_create(struct wg_sample_queue **out); diff --git a/dlls/winegstreamer/video_processor.c b/dlls/winegstreamer/video_processor.c index 5555cef8303..3cb2c1a5bc6 100644 --- a/dlls/winegstreamer/video_processor.c +++ b/dlls/winegstreamer/video_processor.c @@ -91,6 +91,26 @@ struct video_processor IMFVideoSampleAllocatorEx *allocator; };
+static void update_video_aperture(MFVideoInfo *input_info, MFVideoInfo *output_info) +{ + RECT input_rect, output_rect; + + get_mf_video_content_rect(input_info, &input_rect); + get_mf_video_content_rect(output_info, &output_rect); + + if (!EqualRect(&input_rect, &output_rect)) + { + FIXME("Mismatched content size %s vs %s\n", wine_dbgstr_rect(&input_rect), + wine_dbgstr_rect(&output_rect)); + } + + input_info->MinimumDisplayAperture.OffsetX.value = input_rect.left; + input_info->MinimumDisplayAperture.OffsetY.value = input_rect.top; + input_info->MinimumDisplayAperture.Area.cx = input_rect.right - input_rect.left; + input_info->MinimumDisplayAperture.Area.cy = input_rect.bottom - input_rect.top; + output_info->MinimumDisplayAperture = input_info->MinimumDisplayAperture; +} + static HRESULT normalize_media_types(BOOL bottom_up, IMFMediaType **input_type, IMFMediaType **output_type) { MFVIDEOFORMAT *input_format, *output_format; @@ -114,6 +134,8 @@ static HRESULT normalize_media_types(BOOL bottom_up, IMFMediaType **input_type, if (bottom_up && normalize_output) output_format->videoInfo.VideoFlags |= MFVideoFlag_BottomUpLinearRep;
+ update_video_aperture(&input_format->videoInfo, &output_format->videoInfo); + if (FAILED(hr = MFCreateVideoMediaType(input_format, (IMFVideoMediaType **)input_type))) goto done; if (FAILED(hr = MFCreateVideoMediaType(output_format, (IMFVideoMediaType **)output_type)))
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/mf/tests/transform.c | 50 +++++++++++------------------- dlls/winegstreamer/unix_private.h | 5 +++ dlls/winegstreamer/wg_media_type.c | 6 ++++ dlls/winegstreamer/wg_transform.c | 18 +++++++++++ 4 files changed, 47 insertions(+), 32 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index deec2c7dd92..ada915a527a 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -7704,7 +7704,7 @@ static void test_video_processor(void) { .input_type_desc = nv12_with_aperture, .input_bitmap = L"nv12frame.bmp", .output_type_desc = rgb32_no_aperture, .output_bitmap = L"rgb32frame-crop-flip.bmp", - .output_sample_desc = &rgb32_crop_sample_desc, + .output_sample_desc = &rgb32_crop_sample_desc, .delta = 2, /* Windows returns 0, Wine needs 2 */ }, { .input_type_desc = rgb32_no_aperture, .input_bitmap = L"rgb32frame-crop-flip.bmp", @@ -8060,23 +8060,6 @@ static void test_video_processor(void) check_mft_set_input_type(transform, test->input_type_desc, S_OK); check_mft_get_input_current_type(transform, test->input_type_desc);
- if (i >= 15) - { - IMFMediaType *media_type; - HRESULT hr; - - hr = MFCreateMediaType(&media_type); - ok(hr == S_OK, "MFCreateMediaType returned hr %#lx.\n", hr); - init_media_type(media_type, test->output_type_desc, -1); - hr = IMFTransform_SetOutputType(transform, 0, media_type, 0); - todo_wine - ok(hr == S_OK, "SetOutputType returned %#lx.\n", hr); - IMFMediaType_Release(media_type); - - if (hr != S_OK) - goto skip_test; - } - check_mft_set_output_type_required(transform, test->output_type_desc); check_mft_set_output_type(transform, test->output_type_desc, S_OK); check_mft_get_output_current_type(transform, test->output_type_desc); @@ -8188,7 +8171,6 @@ static void test_video_processor(void) ret = IMFSample_Release(output_sample); ok(ret == 0, "Release returned %lu\n", ret);
-skip_test: winetest_pop_context();
hr = IMFTransform_SetInputType(transform, 0, NULL, 0); @@ -8213,8 +8195,8 @@ skip_test: check_mft_set_output_type(transform, rgb32_no_aperture, S_OK); check_mft_get_output_current_type(transform, rgb32_no_aperture);
- check_mft_set_input_type_(__LINE__, transform, nv12_with_aperture, S_OK, TRUE); - check_mft_get_input_current_type_(__LINE__, transform, nv12_with_aperture, TRUE, FALSE); + check_mft_set_input_type(transform, nv12_with_aperture, S_OK); + check_mft_get_input_current_type(transform, nv12_with_aperture);
/* output type is the same as before */ check_mft_get_output_current_type(transform, rgb32_no_aperture); @@ -8879,7 +8861,13 @@ static void test_h264_with_dxgi_manager(void)
status = 0; hr = get_next_h264_output_sample(transform, &input_sample, NULL, output, &data, &data_len); + todo_wine_if(hr == MF_E_UNEXPECTED) /* with some llvmpipe versions */ ok(hr == S_OK, "got %#lx\n", hr); + if (hr == MF_E_UNEXPECTED) + { + IMFSample_Release(input_sample); + goto failed; + } ok(sample != output[0].pSample, "got %p.\n", output[0].pSample); sample = output[0].pSample;
@@ -9524,7 +9512,7 @@ static void test_video_processor_with_dxgi_manager(void) /* check RGB32 output aperture cropping with D3D buffers */
check_mft_set_input_type(transform, nv12_with_aperture, S_OK); - check_mft_set_output_type_(__LINE__, transform, rgb32_no_aperture, S_OK, TRUE); + check_mft_set_output_type(transform, rgb32_no_aperture, S_OK);
load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); /* skip BMP header and RGB data from the dump */ @@ -9536,7 +9524,7 @@ static void test_video_processor_with_dxgi_manager(void) input_sample = create_d3d_sample(allocator, nv12frame_data, nv12frame_data_len);
hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr);
hr = IMFTransform_GetOutputStreamInfo(transform, 0, &info); ok(hr == S_OK, "got %#lx\n", hr); @@ -9545,9 +9533,9 @@ static void test_video_processor_with_dxgi_manager(void) status = 0; memset(&output, 0, sizeof(output)); hr = IMFTransform_ProcessOutput(transform, 0, 1, &output, &status); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr); ok(!output.pEvents, "got events\n"); - todo_wine ok(!!output.pSample, "got no sample\n"); + ok(!!output.pSample, "got no sample\n"); ok(output.dwStatus == 0, "got %#lx\n", output.dwStatus); ok(status == 0, "got %#lx\n", status); if (!output.pSample) goto skip_rgb32; @@ -9582,7 +9570,6 @@ static void test_video_processor_with_dxgi_manager(void) IMFSample_Release(output.pSample);
ret = check_mf_sample_collection(output_samples, &output_sample_desc_rgb32_crop, L"rgb32frame-crop.bmp"); - todo_wine /* FIXME: video process vertically flips the frame... */ ok(ret <= 5, "got %lu%% diff\n", ret);
IMFCollection_Release(output_samples); @@ -9592,7 +9579,7 @@ skip_rgb32: /* check ABGR32 output with D3D buffers */
check_mft_set_input_type(transform, nv12_with_aperture, S_OK); - check_mft_set_output_type_(__LINE__, transform, abgr32_no_aperture, S_OK, TRUE); + check_mft_set_output_type(transform, abgr32_no_aperture, S_OK);
load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); /* skip BMP header and RGB data from the dump */ @@ -9604,7 +9591,7 @@ skip_rgb32: input_sample = create_d3d_sample(allocator, nv12frame_data, nv12frame_data_len);
hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr);
hr = IMFTransform_GetOutputStreamInfo(transform, 0, &info); ok(hr == S_OK, "got %#lx\n", hr); @@ -9613,9 +9600,9 @@ skip_rgb32: status = 0; memset(&output, 0, sizeof(output)); hr = IMFTransform_ProcessOutput(transform, 0, 1, &output, &status); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr); ok(!output.pEvents, "got events\n"); - todo_wine ok(!!output.pSample, "got no sample\n"); + ok(!!output.pSample, "got no sample\n"); ok(output.dwStatus == 0, "got %#lx\n", output.dwStatus); ok(status == 0, "got %#lx\n", status); if (!output.pSample) goto skip_abgr32; @@ -9631,7 +9618,7 @@ skip_rgb32: ID3D11Texture2D_GetDesc(tex2d, &desc); ok(desc.Format == DXGI_FORMAT_R8G8B8A8_UNORM, "got %#x.\n", desc.Format); ok(!desc.Usage, "got %u.\n", desc.Usage); - ok(desc.BindFlags == D3D11_BIND_RENDER_TARGET, "got %#x.\n", desc.BindFlags); + todo_wine ok(desc.BindFlags == D3D11_BIND_RENDER_TARGET, "got %#x.\n", desc.BindFlags); ok(!desc.CPUAccessFlags, "got %#x.\n", desc.CPUAccessFlags); ok(!desc.MiscFlags, "got %#x.\n", desc.MiscFlags); ok(desc.MipLevels == 1, "git %u.\n", desc.MipLevels); @@ -9650,7 +9637,6 @@ skip_rgb32: IMFSample_Release(output.pSample);
ret = check_mf_sample_collection(output_samples, &output_sample_desc_abgr32_crop, L"abgr32frame-crop.bmp"); - todo_wine /* FIXME: video process vertically flips the frame... */ ok(ret <= 8 /* NVIDIA needs 5, AMD needs 8 */, "got %lu%% diff\n", ret);
IMFCollection_Release(output_samples); diff --git a/dlls/winegstreamer/unix_private.h b/dlls/winegstreamer/unix_private.h index 985b70a925c..0c285457d78 100644 --- a/dlls/winegstreamer/unix_private.h +++ b/dlls/winegstreamer/unix_private.h @@ -69,6 +69,11 @@ extern NTSTATUS wg_transform_notify_qos(void *args);
/* wg_media_type.c */
+static inline BOOL is_mf_video_area_empty(const MFVideoArea *area) +{ + return !area->OffsetX.value && !area->OffsetY.value && !area->Area.cx && !area->Area.cy; +} + extern GstCaps *caps_from_media_type(const struct wg_media_type *media_type); extern NTSTATUS caps_to_media_type(GstCaps *caps, struct wg_media_type *media_type, UINT32 video_plane_align); diff --git a/dlls/winegstreamer/wg_media_type.c b/dlls/winegstreamer/wg_media_type.c index 14fc1a9cdf4..5b9f59258a1 100644 --- a/dlls/winegstreamer/wg_media_type.c +++ b/dlls/winegstreamer/wg_media_type.c @@ -410,6 +410,12 @@ static GstCaps *caps_from_video_format(const MFVIDEOFORMAT *format, UINT32 forma format->videoInfo.FramesPerSecond.Numerator, format->videoInfo.FramesPerSecond.Denominator, NULL);
+ if (!is_mf_video_area_empty(&format->videoInfo.MinimumDisplayAperture)) + { + gst_caps_set_simple(caps, "width", G_TYPE_INT, format->videoInfo.MinimumDisplayAperture.Area.cx, NULL); + gst_caps_set_simple(caps, "height", G_TYPE_INT, format->videoInfo.MinimumDisplayAperture.Area.cy, NULL); + } + if (video_format == GST_VIDEO_FORMAT_ENCODED) init_caps_from_video_subtype(caps, &format->guidFormat, format, format_size);
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 2537331a118..cd6bb9c8f52 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -72,10 +72,28 @@ static struct wg_transform *get_transform(wg_transform_t trans) static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, GstVideoInfo *info, GstVideoAlignment *align) { + const MFVideoArea *aperture = &video_info->MinimumDisplayAperture; + gst_video_alignment_reset(align);
align->padding_right = ((plane_align + 1) - (info->width & plane_align)) & plane_align; align->padding_bottom = ((plane_align + 1) - (info->height & plane_align)) & plane_align; + + if (!is_mf_video_area_empty(aperture)) + { + align->padding_right = max(align->padding_right, video_info->dwWidth - aperture->OffsetX.value - aperture->Area.cx); + align->padding_bottom = max(align->padding_bottom, video_info->dwHeight - aperture->OffsetY.value - aperture->Area.cy); + align->padding_top = aperture->OffsetX.value; + align->padding_left = aperture->OffsetY.value; + } + + if (video_info->VideoFlags & MFVideoFlag_BottomUpLinearRep) + { + gsize top = align->padding_top; + align->padding_top = align->padding_bottom; + align->padding_bottom = top; + } + align->stride_align[0] = plane_align; align->stride_align[1] = plane_align; align->stride_align[2] = plane_align;
On Wed Jul 3 05:36:55 2024 +0000, Rémi Bernon wrote:
I dropped the color converter normalization, I don't think it's being used very often and not in the MF pipelines at least, so I don't really need it to be changed.
Actually we need to keep some fixups for the tests to pass. I've changed it to drop all aperture information as your tests suggest it's doing.
This merge request was approved by Elizabeth Figura.