From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 614125522a8..018131f4f97 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -131,6 +131,7 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, gsize plane_align = transform->attrs.output_plane_align; GstStructure *config, *params; GstVideoAlignment align; + const char *mime_type; gboolean needs_pool; GstBufferPool *pool; GstVideoInfo info; @@ -139,7 +140,9 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, GST_LOG("transform %p, %"GST_PTR_FORMAT, transform, query);
gst_query_parse_allocation(query, &caps, &needs_pool); - if (stream_type_from_caps(caps) != GST_STREAM_TYPE_VIDEO || !needs_pool) + + mime_type = gst_structure_get_name(gst_caps_get_structure(caps, 0)); + if (strcmp(mime_type, "video/x-raw") || !needs_pool) return false;
if (!gst_video_info_from_caps(&info, caps) @@ -912,6 +915,7 @@ NTSTATUS wg_transform_read_data(void *args) struct wg_sample *sample = params->sample; GstVideoAlignment align = {0}; GstBuffer *output_buffer; + const char *output_mime; GstCaps *output_caps; bool discard_data; NTSTATUS status; @@ -927,8 +931,9 @@ NTSTATUS wg_transform_read_data(void *args)
output_buffer = gst_sample_get_buffer(transform->output_sample); output_caps = gst_sample_get_caps(transform->output_sample); + output_mime = gst_structure_get_name(gst_caps_get_structure(output_caps, 0));
- if (stream_type_from_caps(output_caps) == GST_STREAM_TYPE_VIDEO) + if (!strcmp(output_mime, "video/x-raw")) { gsize plane_align = transform->attrs.output_plane_align;
@@ -949,7 +954,7 @@ NTSTATUS wg_transform_read_data(void *args) return STATUS_SUCCESS; }
- if (stream_type_from_caps(output_caps) == GST_STREAM_TYPE_VIDEO) + if (!strcmp(output_mime, "video/x-raw")) status = read_transform_output_video(sample, output_buffer, &src_video_info, &dst_video_info); else
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 84 +++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 26 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 018131f4f97..a6f12d41080 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -82,6 +82,57 @@ static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVi gst_video_info_align(info, align); }
+typedef struct +{ + GstVideoBufferPool parent; + GstVideoInfo info; +} WgVideoBufferPool; + +typedef struct +{ + GstVideoBufferPoolClass parent_class; +} WgVideoBufferPoolClass; + +G_DEFINE_TYPE(WgVideoBufferPool, wg_video_buffer_pool, GST_TYPE_VIDEO_BUFFER_POOL); + +static void wg_video_buffer_pool_init(WgVideoBufferPool *pool) +{ +} + +static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) +{ +} + +static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane_align, + GstAllocator *allocator, GstVideoAlignment *align) +{ + WgVideoBufferPool *pool; + GstStructure *config; + + if (!(pool = g_object_new(wg_video_buffer_pool_get_type(), NULL))) + return NULL; + + gst_video_info_from_caps(&pool->info, caps); + align_video_info_planes(plane_align, &pool->info, align); + + if (!(config = gst_buffer_pool_get_config(GST_BUFFER_POOL(pool)))) + GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); + else + { + gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META); + gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); + gst_buffer_pool_config_set_video_alignment(config, align); + + gst_buffer_pool_config_set_params(config, caps, pool->info.size, 0, 0); + gst_buffer_pool_config_set_allocator(config, allocator, NULL); + if (!gst_buffer_pool_set_config(GST_BUFFER_POOL(pool), config)) + GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); + } + + GST_INFO("Created %"GST_PTR_FORMAT, pool); + return pool; +} + static GstFlowReturn transform_sink_chain_cb(GstPad *pad, GstObject *parent, GstBuffer *buffer) { struct wg_transform *transform = gst_pad_get_element_private(pad); @@ -128,13 +179,11 @@ static gboolean transform_src_query_cb(GstPad *pad, GstObject *parent, GstQuery
static gboolean transform_sink_query_allocation(struct wg_transform *transform, GstQuery *query) { - gsize plane_align = transform->attrs.output_plane_align; - GstStructure *config, *params; + WgVideoBufferPool *pool; GstVideoAlignment align; const char *mime_type; + GstStructure *params; gboolean needs_pool; - GstBufferPool *pool; - GstVideoInfo info; GstCaps *caps;
GST_LOG("transform %p, %"GST_PTR_FORMAT, transform, query); @@ -145,12 +194,10 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, if (strcmp(mime_type, "video/x-raw") || !needs_pool) return false;
- if (!gst_video_info_from_caps(&info, caps) - || !(pool = gst_video_buffer_pool_new())) + if (!(pool = wg_video_buffer_pool_create(caps, transform->attrs.output_plane_align, + transform->allocator, &align))) return false;
- align_video_info_planes(plane_align, &info, &align); - if ((params = gst_structure_new("video-meta", "padding-top", G_TYPE_UINT, align.padding_top, "padding-bottom", G_TYPE_UINT, align.padding_bottom, @@ -162,30 +209,15 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, gst_structure_free(params); }
- if (!(config = gst_buffer_pool_get_config(pool))) - GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); - else - { - gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META); - gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); - gst_buffer_pool_config_set_video_alignment(config, &align); - - gst_buffer_pool_config_set_params(config, caps, - info.size, 0, 0); - gst_buffer_pool_config_set_allocator(config, transform->allocator, NULL); - if (!gst_buffer_pool_set_config(pool, config)) - GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); - } - /* Prevent pool reconfiguration, we don't want another alignment. */ - if (!gst_buffer_pool_set_active(pool, true)) + if (!gst_buffer_pool_set_active(GST_BUFFER_POOL(pool), true)) GST_ERROR("%"GST_PTR_FORMAT" failed to activate.", pool);
- gst_query_add_allocation_pool(query, pool, info.size, 0, 0); + gst_query_add_allocation_pool(query, GST_BUFFER_POOL(pool), pool->info.size, 0, 0); gst_query_add_allocation_param(query, transform->allocator, NULL);
GST_INFO("Proposing %"GST_PTR_FORMAT", buffer size %#zx, %"GST_PTR_FORMAT", for %"GST_PTR_FORMAT, - pool, info.size, transform->allocator, query); + pool, pool->info.size, transform->allocator, query);
g_object_unref(pool); return true;
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 39 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index a6f12d41080..ac036fcce59 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -61,6 +61,7 @@ struct wg_transform bool output_caps_changed; GstCaps *desired_caps; GstCaps *output_caps; + GstCaps *input_caps; };
static struct wg_transform *get_transform(wg_transform_t trans) @@ -365,6 +366,7 @@ NTSTATUS wg_transform_destroy(void *args) gst_query_unref(transform->drain_query); gst_caps_unref(transform->desired_caps); gst_caps_unref(transform->output_caps); + gst_caps_unref(transform->input_caps); gst_atomic_queue_unref(transform->output_queue); free(transform);
@@ -405,7 +407,7 @@ NTSTATUS wg_transform_create(void *args) { struct wg_transform_create_params *params = args; GstElement *first = NULL, *last = NULL, *element; - GstCaps *sink_caps = NULL, *src_caps = NULL, *parsed_caps = NULL; + GstCaps *sink_caps = NULL, *parsed_caps = NULL; NTSTATUS status = STATUS_UNSUCCESSFUL; const gchar *input_mime, *output_mime; GstPadTemplate *template = NULL; @@ -427,27 +429,31 @@ NTSTATUS wg_transform_create(void *args) goto out; transform->attrs = params->attrs;
+ if (!(transform->input_caps = caps_from_media_type(¶ms->input_type))) + goto out; + GST_INFO("transform %p input caps %"GST_PTR_FORMAT, transform, transform->input_caps); + input_mime = gst_structure_get_name(gst_caps_get_structure(transform->input_caps, 0)); + + if (!(transform->output_caps = caps_from_media_type(¶ms->output_type))) + goto out; + GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, transform->output_caps); + output_mime = gst_structure_get_name(gst_caps_get_structure(transform->output_caps, 0)); + if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video)) transform->input_info = params->input_type.u.video->videoInfo; if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) output_info = params->output_type.u.video->videoInfo;
- if (!(src_caps = caps_from_media_type(¶ms->input_type))) - goto out; - if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, src_caps))) + if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; transform->my_src = gst_pad_new_from_template(template, "src"); g_object_unref(template); if (!transform->my_src) goto out;
- GST_INFO("transform %p input caps %"GST_PTR_FORMAT, transform, src_caps); - gst_pad_set_element_private(transform->my_src, transform); gst_pad_set_query_function(transform->my_src, transform_src_query_cb);
- if (!(transform->output_caps = caps_from_media_type(¶ms->output_type))) - goto out; transform->desired_caps = gst_caps_ref(transform->output_caps); if (!(template = gst_pad_template_new("sink", GST_PAD_SINK, GST_PAD_ALWAYS, transform->output_caps))) goto out; @@ -456,34 +462,30 @@ NTSTATUS wg_transform_create(void *args) if (!transform->my_sink) goto out;
- GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, transform->output_caps); - gst_pad_set_element_private(transform->my_sink, transform); gst_pad_set_event_function(transform->my_sink, transform_sink_event_cb); gst_pad_set_query_function(transform->my_sink, transform_sink_query_cb); gst_pad_set_chain_function(transform->my_sink, transform_sink_chain_cb);
- input_mime = gst_structure_get_name(gst_caps_get_structure(src_caps, 0)); - if (!(parsed_caps = transform_get_parsed_caps(src_caps, input_mime))) + if (!(parsed_caps = transform_get_parsed_caps(transform->input_caps, input_mime))) goto out;
/* Since we append conversion elements, we don't want to filter decoders * based on the actual output caps now. Matching decoders with the * raw output media type should be enough. */ - output_mime = gst_structure_get_name(gst_caps_get_structure(transform->output_caps, 0)); if (!(sink_caps = gst_caps_new_empty_simple(output_mime))) goto out;
if (strcmp(input_mime, "audio/x-raw") && strcmp(input_mime, "video/x-raw")) { - if ((element = find_element(GST_ELEMENT_FACTORY_TYPE_PARSER, src_caps, parsed_caps)) + if ((element = find_element(GST_ELEMENT_FACTORY_TYPE_PARSER, transform->input_caps, parsed_caps)) && !append_element(transform->container, element, &first, &last)) goto out; else if (!element) { gst_caps_unref(parsed_caps); - parsed_caps = gst_caps_ref(src_caps); + parsed_caps = gst_caps_ref(transform->input_caps); }
if (!(element = find_element(GST_ELEMENT_FACTORY_TYPE_DECODER, parsed_caps, sink_caps)) @@ -560,7 +562,7 @@ NTSTATUS wg_transform_create(void *args) if (!(event = gst_event_new_stream_start("stream")) || !push_event(transform->my_src, event)) goto out; - if (!(event = gst_event_new_caps(src_caps)) + if (!(event = gst_event_new_caps(transform->input_caps)) || !push_event(transform->my_src, event)) goto out;
@@ -574,7 +576,6 @@ NTSTATUS wg_transform_create(void *args) goto out;
gst_caps_unref(parsed_caps); - gst_caps_unref(src_caps); gst_caps_unref(sink_caps);
GST_INFO("Created winegstreamer transform %p.", transform); @@ -590,8 +591,8 @@ out: gst_caps_unref(transform->output_caps); if (transform->my_src) gst_object_unref(transform->my_src); - if (src_caps) - gst_caps_unref(src_caps); + if (transform->input_caps) + gst_caps_unref(transform->input_caps); if (parsed_caps) gst_caps_unref(parsed_caps); if (sink_caps)
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 108 +++++++++++++++++++++--------- 1 file changed, 76 insertions(+), 32 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index ac036fcce59..2537331a118 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -53,8 +53,8 @@ struct wg_transform GstQuery *drain_query;
GstAtomicQueue *input_queue; - GstElement *video_flip; MFVideoInfo input_info; + MFVideoInfo output_info;
GstAtomicQueue *output_queue; GstSample *output_sample; @@ -69,7 +69,8 @@ static struct wg_transform *get_transform(wg_transform_t trans) return (struct wg_transform *)(ULONG_PTR)trans; }
-static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVideoAlignment *align) +static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, + GstVideoInfo *info, GstVideoAlignment *align) { gst_video_alignment_reset(align);
@@ -81,6 +82,15 @@ static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVi align->stride_align[3] = plane_align;
gst_video_info_align(info, align); + + if (video_info->VideoFlags & MFVideoFlag_BottomUpLinearRep) + { + for (guint i = 0; i < ARRAY_SIZE(info->offset); ++i) + { + info->offset[i] += (info->height - 1) * info->stride[i]; + info->stride[i] = -info->stride[i]; + } + } }
typedef struct @@ -96,16 +106,53 @@ typedef struct
G_DEFINE_TYPE(WgVideoBufferPool, wg_video_buffer_pool, GST_TYPE_VIDEO_BUFFER_POOL);
+static void buffer_add_video_meta(GstBuffer *buffer, GstVideoInfo *info) +{ + GstVideoMeta *meta; + + if (!(meta = gst_buffer_get_video_meta(buffer))) + meta = gst_buffer_add_video_meta(buffer, GST_VIDEO_FRAME_FLAG_NONE, + info->finfo->format, info->width, info->height); + + if (!meta) + GST_ERROR("Failed to add video meta to buffer %"GST_PTR_FORMAT, buffer); + else + { + memcpy(meta->offset, info->offset, sizeof(info->offset)); + memcpy(meta->stride, info->stride, sizeof(info->stride)); + } +} + +static GstFlowReturn wg_video_buffer_pool_alloc_buffer(GstBufferPool *gst_pool, GstBuffer **buffer, + GstBufferPoolAcquireParams *params) +{ + GstBufferPoolClass *parent_class = GST_BUFFER_POOL_CLASS(wg_video_buffer_pool_parent_class); + WgVideoBufferPool *pool = (WgVideoBufferPool *)gst_pool; + GstFlowReturn ret; + + GST_LOG("%"GST_PTR_FORMAT", buffer %p, params %p", pool, buffer, params); + + if (!(ret = parent_class->alloc_buffer(gst_pool, buffer, params))) + { + buffer_add_video_meta(*buffer, &pool->info); + GST_INFO("%"GST_PTR_FORMAT" allocated buffer %"GST_PTR_FORMAT, pool, *buffer); + } + + return ret; +} + static void wg_video_buffer_pool_init(WgVideoBufferPool *pool) { }
static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) { + GstBufferPoolClass *pool_class = GST_BUFFER_POOL_CLASS(klass); + pool_class->alloc_buffer = wg_video_buffer_pool_alloc_buffer; }
static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane_align, - GstAllocator *allocator, GstVideoAlignment *align) + GstAllocator *allocator, MFVideoInfo *video_info, GstVideoAlignment *align) { WgVideoBufferPool *pool; GstStructure *config; @@ -114,7 +161,7 @@ static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane return NULL;
gst_video_info_from_caps(&pool->info, caps); - align_video_info_planes(plane_align, &pool->info, align); + align_video_info_planes(video_info, plane_align, &pool->info, align);
if (!(config = gst_buffer_pool_get_config(GST_BUFFER_POOL(pool)))) GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); @@ -196,7 +243,7 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, return false;
if (!(pool = wg_video_buffer_pool_create(caps, transform->attrs.output_plane_align, - transform->allocator, &align))) + transform->allocator, &transform->output_info, &align))) return false;
if ((params = gst_structure_new("video-meta", @@ -412,7 +459,6 @@ NTSTATUS wg_transform_create(void *args) const gchar *input_mime, *output_mime; GstPadTemplate *template = NULL; struct wg_transform *transform; - MFVideoInfo output_info = {0}; GstEvent *event;
if (!(transform = calloc(1, sizeof(*transform)))) @@ -442,7 +488,7 @@ NTSTATUS wg_transform_create(void *args) if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video)) transform->input_info = params->input_type.u.video->videoInfo; if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) - output_info = params->output_type.u.video->videoInfo; + transform->output_info = params->output_type.u.video->videoInfo;
if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; @@ -529,15 +575,6 @@ NTSTATUS wg_transform_create(void *args) } else { - if (!(element = create_element("videoconvert", "base")) - || !append_element(transform->container, element, &first, &last)) - goto out; - if (!(transform->video_flip = create_element("videoflip", "base")) - || !append_element(transform->container, transform->video_flip, &first, &last)) - goto out; - - if ((transform->input_info.VideoFlags ^ output_info.VideoFlags) & MFVideoFlag_BottomUpLinearRep) - gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", "vertical-flip"); if (!(element = create_element("videoconvert", "base")) || !append_element(transform->container, element, &first, &last)) goto out; @@ -635,19 +672,18 @@ NTSTATUS wg_transform_set_output_type(void *args) { struct wg_transform_set_output_type_params *params = args; struct wg_transform *transform = get_transform(params->transform); - MFVideoInfo output_info = {0}; GstCaps *caps, *stripped; GstSample *sample;
- if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) - output_info = params->media_type.u.video->videoInfo; - if (!(caps = caps_from_media_type(¶ms->media_type))) { GST_ERROR("Failed to convert media type to caps."); return STATUS_UNSUCCESSFUL; }
+ if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) + transform->output_info = params->media_type.u.video->videoInfo; + GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, caps);
stripped = caps_strip_fields(caps, transform->attrs.allow_format_change); @@ -668,16 +704,6 @@ NTSTATUS wg_transform_set_output_type(void *args) gst_caps_unref(transform->desired_caps); transform->desired_caps = caps;
- if (transform->video_flip) - { - const char *value; - - if ((transform->input_info.VideoFlags ^ output_info.VideoFlags) & MFVideoFlag_BottomUpLinearRep) - value = "vertical-flip"; - else - value = "none"; - gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", value); - } if (!push_event(transform->my_sink, gst_event_new_reconfigure())) { GST_ERROR("Failed to reconfigure transform %p.", transform); @@ -711,6 +737,8 @@ NTSTATUS wg_transform_push_data(void *args) struct wg_transform_push_data_params *params = args; struct wg_transform *transform = get_transform(params->transform); struct wg_sample *sample = params->sample; + const gchar *input_mime; + GstVideoInfo video_info; GstBuffer *buffer; guint length;
@@ -734,6 +762,14 @@ NTSTATUS wg_transform_push_data(void *args) GST_INFO("Wrapped %u/%u bytes from sample %p to %"GST_PTR_FORMAT, sample->size, sample->max_size, sample, buffer); }
+ input_mime = gst_structure_get_name(gst_caps_get_structure(transform->input_caps, 0)); + if (!strcmp(input_mime, "video/x-raw") && gst_video_info_from_caps(&video_info, transform->input_caps)) + { + GstVideoAlignment align; + align_video_info_planes(&transform->input_info, 0, &video_info, &align); + buffer_add_video_meta(buffer, &video_info); + } + if (sample->flags & WG_SAMPLE_FLAG_HAS_PTS) GST_BUFFER_PTS(buffer) = sample->pts * 100; if (sample->flags & WG_SAMPLE_FLAG_HAS_DURATION) @@ -969,13 +1005,21 @@ NTSTATUS wg_transform_read_data(void *args) if (!strcmp(output_mime, "video/x-raw")) { gsize plane_align = transform->attrs.output_plane_align; + GstVideoMeta *meta;
if (!gst_video_info_from_caps(&src_video_info, output_caps)) GST_ERROR("Failed to get video info from %"GST_PTR_FORMAT, output_caps); dst_video_info = src_video_info;
- /* set the desired output buffer alignment on the dest video info */ - align_video_info_planes(plane_align, &dst_video_info, &align); + /* set the desired output buffer alignment and stride on the dest video info */ + align_video_info_planes(&transform->output_info, plane_align, &dst_video_info, &align); + + /* copy the actual output buffer alignment and stride to the src video info */ + if ((meta = gst_buffer_get_video_meta(output_buffer))) + { + memcpy(src_video_info.offset, meta->offset, sizeof(meta->offset)); + memcpy(src_video_info.stride, meta->stride, sizeof(meta->stride)); + } }
if (GST_MINI_OBJECT_FLAG_IS_SET(transform->output_sample, GST_SAMPLE_FLAG_WG_CAPS_CHANGED))
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/mf/tests/transform.c | 50 ++++++---------- dlls/winegstreamer/wg_media_type.c | 11 ++++ dlls/winegstreamer/wg_transform.c | 93 ++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+), 32 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index deec2c7dd92..ada915a527a 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -7704,7 +7704,7 @@ static void test_video_processor(void) { .input_type_desc = nv12_with_aperture, .input_bitmap = L"nv12frame.bmp", .output_type_desc = rgb32_no_aperture, .output_bitmap = L"rgb32frame-crop-flip.bmp", - .output_sample_desc = &rgb32_crop_sample_desc, + .output_sample_desc = &rgb32_crop_sample_desc, .delta = 2, /* Windows returns 0, Wine needs 2 */ }, { .input_type_desc = rgb32_no_aperture, .input_bitmap = L"rgb32frame-crop-flip.bmp", @@ -8060,23 +8060,6 @@ static void test_video_processor(void) check_mft_set_input_type(transform, test->input_type_desc, S_OK); check_mft_get_input_current_type(transform, test->input_type_desc);
- if (i >= 15) - { - IMFMediaType *media_type; - HRESULT hr; - - hr = MFCreateMediaType(&media_type); - ok(hr == S_OK, "MFCreateMediaType returned hr %#lx.\n", hr); - init_media_type(media_type, test->output_type_desc, -1); - hr = IMFTransform_SetOutputType(transform, 0, media_type, 0); - todo_wine - ok(hr == S_OK, "SetOutputType returned %#lx.\n", hr); - IMFMediaType_Release(media_type); - - if (hr != S_OK) - goto skip_test; - } - check_mft_set_output_type_required(transform, test->output_type_desc); check_mft_set_output_type(transform, test->output_type_desc, S_OK); check_mft_get_output_current_type(transform, test->output_type_desc); @@ -8188,7 +8171,6 @@ static void test_video_processor(void) ret = IMFSample_Release(output_sample); ok(ret == 0, "Release returned %lu\n", ret);
-skip_test: winetest_pop_context();
hr = IMFTransform_SetInputType(transform, 0, NULL, 0); @@ -8213,8 +8195,8 @@ skip_test: check_mft_set_output_type(transform, rgb32_no_aperture, S_OK); check_mft_get_output_current_type(transform, rgb32_no_aperture);
- check_mft_set_input_type_(__LINE__, transform, nv12_with_aperture, S_OK, TRUE); - check_mft_get_input_current_type_(__LINE__, transform, nv12_with_aperture, TRUE, FALSE); + check_mft_set_input_type(transform, nv12_with_aperture, S_OK); + check_mft_get_input_current_type(transform, nv12_with_aperture);
/* output type is the same as before */ check_mft_get_output_current_type(transform, rgb32_no_aperture); @@ -8879,7 +8861,13 @@ static void test_h264_with_dxgi_manager(void)
status = 0; hr = get_next_h264_output_sample(transform, &input_sample, NULL, output, &data, &data_len); + todo_wine_if(hr == MF_E_UNEXPECTED) /* with some llvmpipe versions */ ok(hr == S_OK, "got %#lx\n", hr); + if (hr == MF_E_UNEXPECTED) + { + IMFSample_Release(input_sample); + goto failed; + } ok(sample != output[0].pSample, "got %p.\n", output[0].pSample); sample = output[0].pSample;
@@ -9524,7 +9512,7 @@ static void test_video_processor_with_dxgi_manager(void) /* check RGB32 output aperture cropping with D3D buffers */
check_mft_set_input_type(transform, nv12_with_aperture, S_OK); - check_mft_set_output_type_(__LINE__, transform, rgb32_no_aperture, S_OK, TRUE); + check_mft_set_output_type(transform, rgb32_no_aperture, S_OK);
load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); /* skip BMP header and RGB data from the dump */ @@ -9536,7 +9524,7 @@ static void test_video_processor_with_dxgi_manager(void) input_sample = create_d3d_sample(allocator, nv12frame_data, nv12frame_data_len);
hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr);
hr = IMFTransform_GetOutputStreamInfo(transform, 0, &info); ok(hr == S_OK, "got %#lx\n", hr); @@ -9545,9 +9533,9 @@ static void test_video_processor_with_dxgi_manager(void) status = 0; memset(&output, 0, sizeof(output)); hr = IMFTransform_ProcessOutput(transform, 0, 1, &output, &status); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr); ok(!output.pEvents, "got events\n"); - todo_wine ok(!!output.pSample, "got no sample\n"); + ok(!!output.pSample, "got no sample\n"); ok(output.dwStatus == 0, "got %#lx\n", output.dwStatus); ok(status == 0, "got %#lx\n", status); if (!output.pSample) goto skip_rgb32; @@ -9582,7 +9570,6 @@ static void test_video_processor_with_dxgi_manager(void) IMFSample_Release(output.pSample);
ret = check_mf_sample_collection(output_samples, &output_sample_desc_rgb32_crop, L"rgb32frame-crop.bmp"); - todo_wine /* FIXME: video process vertically flips the frame... */ ok(ret <= 5, "got %lu%% diff\n", ret);
IMFCollection_Release(output_samples); @@ -9592,7 +9579,7 @@ skip_rgb32: /* check ABGR32 output with D3D buffers */
check_mft_set_input_type(transform, nv12_with_aperture, S_OK); - check_mft_set_output_type_(__LINE__, transform, abgr32_no_aperture, S_OK, TRUE); + check_mft_set_output_type(transform, abgr32_no_aperture, S_OK);
load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); /* skip BMP header and RGB data from the dump */ @@ -9604,7 +9591,7 @@ skip_rgb32: input_sample = create_d3d_sample(allocator, nv12frame_data, nv12frame_data_len);
hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr);
hr = IMFTransform_GetOutputStreamInfo(transform, 0, &info); ok(hr == S_OK, "got %#lx\n", hr); @@ -9613,9 +9600,9 @@ skip_rgb32: status = 0; memset(&output, 0, sizeof(output)); hr = IMFTransform_ProcessOutput(transform, 0, 1, &output, &status); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr); ok(!output.pEvents, "got events\n"); - todo_wine ok(!!output.pSample, "got no sample\n"); + ok(!!output.pSample, "got no sample\n"); ok(output.dwStatus == 0, "got %#lx\n", output.dwStatus); ok(status == 0, "got %#lx\n", status); if (!output.pSample) goto skip_abgr32; @@ -9631,7 +9618,7 @@ skip_rgb32: ID3D11Texture2D_GetDesc(tex2d, &desc); ok(desc.Format == DXGI_FORMAT_R8G8B8A8_UNORM, "got %#x.\n", desc.Format); ok(!desc.Usage, "got %u.\n", desc.Usage); - ok(desc.BindFlags == D3D11_BIND_RENDER_TARGET, "got %#x.\n", desc.BindFlags); + todo_wine ok(desc.BindFlags == D3D11_BIND_RENDER_TARGET, "got %#x.\n", desc.BindFlags); ok(!desc.CPUAccessFlags, "got %#x.\n", desc.CPUAccessFlags); ok(!desc.MiscFlags, "got %#x.\n", desc.MiscFlags); ok(desc.MipLevels == 1, "git %u.\n", desc.MipLevels); @@ -9650,7 +9637,6 @@ skip_rgb32: IMFSample_Release(output.pSample);
ret = check_mf_sample_collection(output_samples, &output_sample_desc_abgr32_crop, L"abgr32frame-crop.bmp"); - todo_wine /* FIXME: video process vertically flips the frame... */ ok(ret <= 8 /* NVIDIA needs 5, AMD needs 8 */, "got %lu%% diff\n", ret);
IMFCollection_Release(output_samples); diff --git a/dlls/winegstreamer/wg_media_type.c b/dlls/winegstreamer/wg_media_type.c index 14fc1a9cdf4..16eb67e1398 100644 --- a/dlls/winegstreamer/wg_media_type.c +++ b/dlls/winegstreamer/wg_media_type.c @@ -381,6 +381,11 @@ static GstVideoFormat subtype_to_gst_video_format(const GUID *subtype) return GST_VIDEO_FORMAT_ENCODED; }
+static BOOL is_mf_video_area_empty(const MFVideoArea *area) +{ + return !area->OffsetX.value && !area->OffsetY.value && !area->Area.cx && !area->Area.cy; +} + static GstCaps *caps_from_video_format(const MFVIDEOFORMAT *format, UINT32 format_size) { GstVideoFormat video_format = subtype_to_gst_video_format(&format->guidFormat); @@ -410,6 +415,12 @@ static GstCaps *caps_from_video_format(const MFVIDEOFORMAT *format, UINT32 forma format->videoInfo.FramesPerSecond.Numerator, format->videoInfo.FramesPerSecond.Denominator, NULL);
+ if (!is_mf_video_area_empty(&format->videoInfo.MinimumDisplayAperture)) + { + gst_caps_set_simple(caps, "width", G_TYPE_INT, format->videoInfo.MinimumDisplayAperture.Area.cx, NULL); + gst_caps_set_simple(caps, "height", G_TYPE_INT, format->videoInfo.MinimumDisplayAperture.Area.cy, NULL); + } + if (video_format == GST_VIDEO_FORMAT_ENCODED) init_caps_from_video_subtype(caps, &format->guidFormat, format, format_size);
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 2537331a118..feaf7c85dad 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -69,13 +69,35 @@ static struct wg_transform *get_transform(wg_transform_t trans) return (struct wg_transform *)(ULONG_PTR)trans; }
+static BOOL is_mf_video_area_empty(const MFVideoArea *area) +{ + return !area->OffsetX.value && !area->OffsetY.value && !area->Area.cx && !area->Area.cy; +} + static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, GstVideoInfo *info, GstVideoAlignment *align) { + const MFVideoArea *aperture = &video_info->MinimumDisplayAperture; + gst_video_alignment_reset(align);
align->padding_right = ((plane_align + 1) - (info->width & plane_align)) & plane_align; align->padding_bottom = ((plane_align + 1) - (info->height & plane_align)) & plane_align; + if (!is_mf_video_area_empty(aperture)) + { + align->padding_right = max(align->padding_right, video_info->dwWidth - aperture->OffsetX.value - aperture->Area.cx); + align->padding_bottom = max(align->padding_bottom, video_info->dwHeight - aperture->OffsetY.value - aperture->Area.cy); + align->padding_top = aperture->OffsetX.value; + align->padding_left = aperture->OffsetY.value; + } + + if (video_info->VideoFlags & MFVideoFlag_BottomUpLinearRep) + { + gsize top = align->padding_top; + align->padding_top = align->padding_bottom; + align->padding_bottom = top; + } + align->stride_align[0] = plane_align; align->stride_align[1] = plane_align; align->stride_align[2] = plane_align; @@ -93,6 +115,57 @@ static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, } }
+static void init_mf_video_info_rect(const MFVideoInfo *info, RECT *rect) +{ + if (!is_mf_video_area_empty(&info->MinimumDisplayAperture)) + { + rect->left = info->MinimumDisplayAperture.OffsetX.value; + rect->top = info->MinimumDisplayAperture.OffsetY.value; + rect->right = rect->left + info->MinimumDisplayAperture.Area.cx; + rect->bottom = rect->top + info->MinimumDisplayAperture.Area.cy; + } + else + { + rect->left = 0; + rect->top = 0; + rect->right = info->dwWidth; + rect->bottom = info->dwHeight; + } +} + +static inline BOOL intersect_rect(RECT *dst, const RECT *src1, const RECT *src2) +{ + dst->left = max(src1->left, src2->left); + dst->top = max(src1->top, src2->top); + dst->right = min(src1->right, src2->right); + dst->bottom = min(src1->bottom, src2->bottom); + return !IsRectEmpty(dst); +} + +static void update_video_aperture(MFVideoInfo *input_info, MFVideoInfo *output_info) +{ + RECT rect, input_rect, output_rect; + + init_mf_video_info_rect(input_info, &input_rect); + init_mf_video_info_rect(output_info, &output_rect); + intersect_rect(&rect, &input_rect, &output_rect); + + input_info->MinimumDisplayAperture.OffsetX.value = rect.left; + input_info->MinimumDisplayAperture.OffsetY.value = rect.top; + input_info->MinimumDisplayAperture.Area.cx = rect.right - rect.left; + input_info->MinimumDisplayAperture.Area.cy = rect.bottom - rect.top; + output_info->MinimumDisplayAperture = input_info->MinimumDisplayAperture; +} + +static void set_video_caps_aperture(GstCaps *caps, MFVideoInfo *video_info) +{ + if (!is_mf_video_area_empty(&video_info->MinimumDisplayAperture)) + { + gst_caps_set_simple(caps, "width", G_TYPE_INT, video_info->MinimumDisplayAperture.Area.cx, NULL); + gst_caps_set_simple(caps, "height", G_TYPE_INT, video_info->MinimumDisplayAperture.Area.cy, NULL); + } +} + typedef struct { GstVideoBufferPool parent; @@ -490,6 +563,15 @@ NTSTATUS wg_transform_create(void *args) if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) transform->output_info = params->output_type.u.video->videoInfo;
+ /* update the video apertures to make sure GStreamer has a consistent input/output frame size */ + if (!strcmp(input_mime, "video/x-raw") && !strcmp(output_mime, "video/x-raw")) + update_video_aperture(&transform->input_info, &transform->output_info); + + if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video)) + set_video_caps_aperture(transform->input_caps, &transform->input_info); + if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) + set_video_caps_aperture(transform->output_caps, &transform->output_info); + if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; transform->my_src = gst_pad_new_from_template(template, "src"); @@ -672,6 +754,7 @@ NTSTATUS wg_transform_set_output_type(void *args) { struct wg_transform_set_output_type_params *params = args; struct wg_transform *transform = get_transform(params->transform); + const char *input_mime, *output_mime; GstCaps *caps, *stripped; GstSample *sample;
@@ -681,9 +764,19 @@ NTSTATUS wg_transform_set_output_type(void *args) return STATUS_UNSUCCESSFUL; }
+ input_mime = gst_structure_get_name(gst_caps_get_structure(transform->input_caps, 0)); + output_mime = gst_structure_get_name(gst_caps_get_structure(caps, 0)); + if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) transform->output_info = params->media_type.u.video->videoInfo;
+ /* update the video apertures to make sure GStreamer has a consistent input/output frame size */ + if (!strcmp(input_mime, "video/x-raw") && !strcmp(output_mime, "video/x-raw")) + update_video_aperture(&transform->input_info, &transform->output_info); + + if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) + set_video_caps_aperture(caps, &transform->output_info); + GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, caps);
stripped = caps_strip_fields(caps, transform->attrs.allow_format_change);
Elizabeth Figura (@zfigura) commented about dlls/winegstreamer/wg_transform.c:
+static inline BOOL intersect_rect(RECT *dst, const RECT *src1, const RECT *src2) +{
- dst->left = max(src1->left, src2->left);
- dst->top = max(src1->top, src2->top);
- dst->right = min(src1->right, src2->right);
- dst->bottom = min(src1->bottom, src2->bottom);
- return !IsRectEmpty(dst);
+}
+static void update_video_aperture(MFVideoInfo *input_info, MFVideoInfo *output_info) +{
- RECT rect, input_rect, output_rect;
- init_mf_video_info_rect(input_info, &input_rect);
- init_mf_video_info_rect(output_info, &output_rect);
- intersect_rect(&rect, &input_rect, &output_rect);
Where does this come from? Unless I've missed something, there are no tests for mismatched content sizes (only mismatched frame sizes with the same content size).
If that's the case, and I'm not missing a test somewhere, can we please either add some tests, or stop pretending to handle mismatched content size, instead just erroring out with a FIXME?
Elizabeth Figura (@zfigura) commented about dlls/winegstreamer/wg_transform.c:
}
}
+static void init_mf_video_info_rect(const MFVideoInfo *info, RECT *rect)
The name of this function doesn't specify which rect, and "init" feels odd. How about something like "get_content_rect()" or "content_rect_from_video_info()"?
Elizabeth Figura (@zfigura) commented about dlls/winegstreamer/wg_transform.c:
if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) transform->output_info = params->output_type.u.video->videoInfo;
- /* update the video apertures to make sure GStreamer has a consistent input/output frame size */
- if (!strcmp(input_mime, "video/x-raw") && !strcmp(output_mime, "video/x-raw"))
update_video_aperture(&transform->input_info, &transform->output_info);
- if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video))
set_video_caps_aperture(transform->input_caps, &transform->input_info);
- if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video))
set_video_caps_aperture(transform->output_caps, &transform->output_info);
If we are going to change the video aperture (and see my other comment), wouldn't it be simpler to do so *before* calling caps_from_media_type()?
In fact, I'd go as far as to do it on the PE side. From experience I find that simplifying the Unix portion, and reducing fixups from that part, leads to an internal API that's easier to work with.
As stated elsewhere, using stride instead of videoflip adds a lot of code and complexity. It means we are now dealing with two complex interacting components instead of two essentially self-contained ones. This change is, at least, not justified.
The only justification I've seen for this change is that it works around a GStreamer bug. This justification should at least be given in the patch subjects, and ideally the bug should be at the very least *reported* to GStreamer, especially considering that it would easily affect other projects than Wine.
A better, and plausible, justification would be that using video stride offers better performance even without the aforementioned bug. This would make sense, since the decoder (or videoconvert) can potentially write the video "already flipped", instead of going through a separate flipping step. Is this in fact the case?