On the client side, frame aperture may be included or omitted. This can be used to include or drop the frame padding, and the VideoProcessor MFT supports it.
On GStreamer side, we can only use the unpadded frame size in the caps, because this is how decoders work, and padding needs to be added as a property of the input/output video buffers. Then, frame size needs to be adjusted to be consistent between input and output, and any difference considered as extra padding to be added on one side or the other.
From: Rémi Bernon rbernon@codeweavers.com
Instead of computing them in copy_video_buffer. --- dlls/winegstreamer/wg_transform.c | 71 +++++++++++++++---------------- 1 file changed, 35 insertions(+), 36 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 516b28e82e2..0217a6c8f26 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -650,25 +650,14 @@ NTSTATUS wg_transform_push_data(void *args) return STATUS_SUCCESS; }
-static NTSTATUS copy_video_buffer(GstBuffer *buffer, GstCaps *caps, gsize plane_align, - struct wg_sample *sample, gsize *total_size) +static NTSTATUS copy_video_buffer(GstBuffer *buffer, GstVideoInfo *src_video_info, + GstVideoInfo *dst_video_info, struct wg_sample *sample, gsize *total_size) { NTSTATUS status = STATUS_UNSUCCESSFUL; GstVideoFrame src_frame, dst_frame; - GstVideoInfo src_info, dst_info; - GstVideoAlignment align; GstBuffer *dst_buffer;
- if (!gst_video_info_from_caps(&src_info, caps)) - { - GST_ERROR("Failed to get video info from caps."); - return STATUS_UNSUCCESSFUL; - } - - dst_info = src_info; - align_video_info_planes(plane_align, &dst_info, &align); - - if (sample->max_size < dst_info.size) + if (sample->max_size < dst_video_info->size) { GST_ERROR("Output buffer is too small."); return STATUS_BUFFER_TOO_SMALL; @@ -680,14 +669,14 @@ static NTSTATUS copy_video_buffer(GstBuffer *buffer, GstCaps *caps, gsize plane_ GST_ERROR("Failed to wrap wg_sample into GstBuffer"); return STATUS_UNSUCCESSFUL; } - gst_buffer_set_size(dst_buffer, dst_info.size); - *total_size = sample->size = dst_info.size; + gst_buffer_set_size(dst_buffer, dst_video_info->size); + *total_size = sample->size = dst_video_info->size;
- if (!gst_video_frame_map(&src_frame, &src_info, buffer, GST_MAP_READ)) + if (!gst_video_frame_map(&src_frame, src_video_info, buffer, GST_MAP_READ)) GST_ERROR("Failed to map source frame."); else { - if (!gst_video_frame_map(&dst_frame, &dst_info, dst_buffer, GST_MAP_WRITE)) + if (!gst_video_frame_map(&dst_frame, dst_video_info, dst_buffer, GST_MAP_WRITE)) GST_ERROR("Failed to map destination frame."); else { @@ -704,8 +693,7 @@ static NTSTATUS copy_video_buffer(GstBuffer *buffer, GstCaps *caps, gsize plane_ return status; }
-static NTSTATUS copy_buffer(GstBuffer *buffer, GstCaps *caps, struct wg_sample *sample, - gsize *total_size) +static NTSTATUS copy_buffer(GstBuffer *buffer, struct wg_sample *sample, gsize *total_size) { GstMapInfo info;
@@ -730,8 +718,8 @@ static NTSTATUS copy_buffer(GstBuffer *buffer, GstCaps *caps, struct wg_sample * return STATUS_SUCCESS; }
-static NTSTATUS read_transform_output_data(GstBuffer *buffer, GstCaps *caps, gsize plane_align, - struct wg_sample *sample) +static NTSTATUS read_transform_output_data(GstBuffer *buffer, GstVideoInfo *src_video_info, + GstVideoInfo *dst_video_info, struct wg_sample *sample) { gsize total_size; bool needs_copy; @@ -750,10 +738,10 @@ static NTSTATUS read_transform_output_data(GstBuffer *buffer, GstCaps *caps, gsi
if (!needs_copy) status = STATUS_SUCCESS; - else if (stream_type_from_caps(caps) == GST_STREAM_TYPE_VIDEO) - status = copy_video_buffer(buffer, caps, plane_align, sample, &total_size); + else if (src_video_info) + status = copy_video_buffer(buffer, src_video_info, dst_video_info, sample, &total_size); else - status = copy_buffer(buffer, caps, sample, &total_size); + status = copy_buffer(buffer, sample, &total_size);
if (status) { @@ -786,7 +774,7 @@ static NTSTATUS read_transform_output_data(GstBuffer *buffer, GstCaps *caps, gsi
if (needs_copy) { - if (stream_type_from_caps(caps) == GST_STREAM_TYPE_VIDEO) + if (src_video_info) GST_WARNING("Copied %u bytes, sample %p, flags %#x", sample->size, sample, sample->flags); else GST_INFO("Copied %u bytes, sample %p, flags %#x", sample->size, sample, sample->flags); @@ -821,10 +809,12 @@ static bool get_transform_output(struct wg_transform *transform, struct wg_sampl
NTSTATUS wg_transform_read_data(void *args) { + GstVideoInfo src_video_info, dst_video_info, *src_video_info_ptr, *dst_video_info_ptr; struct wg_transform_read_data_params *params = args; struct wg_transform *transform = get_transform(params->transform); struct wg_sample *sample = params->sample; struct wg_format *format = params->format; + GstVideoAlignment align = {0}; GstBuffer *output_buffer; GstCaps *output_caps; bool discard_data; @@ -842,6 +832,22 @@ NTSTATUS wg_transform_read_data(void *args) output_buffer = gst_sample_get_buffer(transform->output_sample); output_caps = gst_sample_get_caps(transform->output_sample);
+ if (stream_type_from_caps(output_caps) != GST_STREAM_TYPE_VIDEO + || !gst_video_info_from_caps(&src_video_info, output_caps)) + dst_video_info_ptr = src_video_info_ptr = NULL; + else + { + gsize plane_align = transform->attrs.output_plane_align; + + dst_video_info = src_video_info; + + /* set the desired output buffer alignment on the dest video info */ + align_video_info_planes(plane_align, &dst_video_info, &align); + + src_video_info_ptr = &src_video_info; + dst_video_info_ptr = &dst_video_info; + } + if (GST_MINI_OBJECT_FLAG_IS_SET(transform->output_sample, GST_SAMPLE_FLAG_WG_CAPS_CHANGED)) { GST_MINI_OBJECT_FLAG_UNSET(transform->output_sample, GST_SAMPLE_FLAG_WG_CAPS_CHANGED); @@ -850,17 +856,10 @@ NTSTATUS wg_transform_read_data(void *args)
if (format) { - gsize plane_align = transform->attrs.output_plane_align; - GstVideoAlignment align; - GstVideoInfo info; - wg_format_from_caps(format, output_caps);
- if (format->major_type == WG_MAJOR_TYPE_VIDEO - && gst_video_info_from_caps(&info, output_caps)) + if (format->major_type == WG_MAJOR_TYPE_VIDEO) { - align_video_info_planes(plane_align, &info, &align); - GST_INFO("Returning video alignment left %u, top %u, right %u, bottom %u.", align.padding_left, align.padding_top, align.padding_right, align.padding_bottom);
@@ -881,8 +880,8 @@ NTSTATUS wg_transform_read_data(void *args) return STATUS_SUCCESS; }
- if ((status = read_transform_output_data(output_buffer, output_caps, - transform->attrs.output_plane_align, sample))) + if ((status = read_transform_output_data(output_buffer, src_video_info_ptr, + dst_video_info_ptr, sample))) { wg_allocator_release_sample(transform->allocator, sample, false); return status;
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 48 +++++++++++++++++-------------- 1 file changed, 26 insertions(+), 22 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 0217a6c8f26..3339d60b217 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -38,6 +38,7 @@ #include "mferror.h"
#include "unix_private.h" +#include "wine/debug.h"
#define GST_SAMPLE_FLAG_WG_CAPS_CHANGED (GST_MINI_OBJECT_FLAG_LAST << 0)
@@ -543,16 +544,16 @@ NTSTATUS wg_transform_set_output_format(void *args) { struct wg_transform_set_output_format_params *params = args; struct wg_transform *transform = get_transform(params->transform); - const struct wg_format *format = params->format; + struct wg_format output_format = *params->format; GstSample *sample; GstCaps *caps;
- if (!(caps = transform_format_to_caps(transform, format))) + if (!(caps = transform_format_to_caps(transform, &output_format))) { - GST_ERROR("Failed to convert format %p to caps.", format); + GST_ERROR("Failed to convert format to caps."); return STATUS_UNSUCCESSFUL; } - transform->output_format = *format; + transform->output_format = output_format;
GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, caps);
@@ -574,7 +575,7 @@ NTSTATUS wg_transform_set_output_format(void *args) if (transform->video_flip) { const char *value; - if (transform->input_is_flipped != wg_format_video_is_flipped(format)) + if (transform->input_is_flipped != wg_format_video_is_flipped(&output_format)) value = "vertical-flip"; else value = "none"; @@ -850,30 +851,33 @@ NTSTATUS wg_transform_read_data(void *args)
if (GST_MINI_OBJECT_FLAG_IS_SET(transform->output_sample, GST_SAMPLE_FLAG_WG_CAPS_CHANGED)) { + struct wg_format output_format; + GST_MINI_OBJECT_FLAG_UNSET(transform->output_sample, GST_SAMPLE_FLAG_WG_CAPS_CHANGED);
GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, output_caps);
- if (format) + wg_format_from_caps(&output_format, output_caps); + if (output_format.major_type == WG_MAJOR_TYPE_VIDEO) { - wg_format_from_caps(format, output_caps); - - if (format->major_type == WG_MAJOR_TYPE_VIDEO) - { - GST_INFO("Returning video alignment left %u, top %u, right %u, bottom %u.", align.padding_left, - align.padding_top, align.padding_right, align.padding_bottom); - - format->u.video.padding.left = align.padding_left; - format->u.video.width += format->u.video.padding.left; - format->u.video.padding.right = align.padding_right; - format->u.video.width += format->u.video.padding.right; - format->u.video.padding.top = align.padding_top; - format->u.video.height += format->u.video.padding.top; - format->u.video.padding.bottom = align.padding_bottom; - format->u.video.height += format->u.video.padding.bottom; - } + output_format.u.video.padding.left = align.padding_left; + output_format.u.video.width += output_format.u.video.padding.left; + output_format.u.video.padding.right = align.padding_right; + output_format.u.video.width += output_format.u.video.padding.right; + output_format.u.video.padding.top = align.padding_top; + output_format.u.video.height += output_format.u.video.padding.top; + output_format.u.video.padding.bottom = align.padding_bottom; + output_format.u.video.height += output_format.u.video.padding.bottom; + GST_INFO("new video padding rect %s", wine_dbgstr_rect(&output_format.u.video.padding)); + + if (transform->output_format.u.video.height < 0) + output_format.u.video.height *= -1; }
+ if (format) + *format = output_format; + transform->output_format = output_format; + params->result = MF_E_TRANSFORM_STREAM_CHANGE; GST_INFO("Format changed detected, returning no output"); wg_allocator_release_sample(transform->allocator, sample, false);
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 75 +++++++++++++++++++++---------- 1 file changed, 51 insertions(+), 24 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 3339d60b217..54198085b5b 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -83,6 +83,51 @@ static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVi gst_video_info_align(info, align); }
+typedef struct +{ + GstVideoBufferPool parent; +} WgVideoBufferPool; + +typedef struct +{ + GstVideoBufferPoolClass parent_class; +} WgVideoBufferPoolClass; + +G_DEFINE_TYPE(WgVideoBufferPool, wg_video_buffer_pool, GST_TYPE_VIDEO_BUFFER_POOL); + +static void wg_video_buffer_pool_init(WgVideoBufferPool * pool) {} +static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) {} + +static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane_align, + GstAllocator *allocator, GstVideoInfo *info, GstVideoAlignment *align) +{ + WgVideoBufferPool *pool; + GstStructure *config; + + if (!(pool = g_object_new(wg_video_buffer_pool_get_type(), NULL))) + return NULL; + + gst_video_info_from_caps(info, caps); + align_video_info_planes(plane_align, info, align); + + if (!(config = gst_buffer_pool_get_config(GST_BUFFER_POOL(pool)))) + GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); + else + { + gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META); + gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); + gst_buffer_pool_config_set_video_alignment(config, align); + + gst_buffer_pool_config_set_params(config, caps, info->size, 0, 0); + gst_buffer_pool_config_set_allocator(config, allocator, NULL); + if (!gst_buffer_pool_set_config(GST_BUFFER_POOL(pool), config)) + GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); + } + + GST_INFO("Created %"GST_PTR_FORMAT, pool); + return pool; +} + static GstFlowReturn transform_sink_chain_cb(GstPad *pad, GstObject *parent, GstBuffer *buffer) { struct wg_transform *transform = gst_pad_get_element_private(pad); @@ -129,11 +174,10 @@ static gboolean transform_src_query_cb(GstPad *pad, GstObject *parent, GstQuery
static gboolean transform_sink_query_allocation(struct wg_transform *transform, GstQuery *query) { - gsize plane_align = transform->attrs.output_plane_align; - GstStructure *config, *params; + WgVideoBufferPool *pool; GstVideoAlignment align; + GstStructure *params; gboolean needs_pool; - GstBufferPool *pool; GstVideoInfo info; GstCaps *caps;
@@ -143,12 +187,10 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, if (stream_type_from_caps(caps) != GST_STREAM_TYPE_VIDEO || !needs_pool) return false;
- if (!gst_video_info_from_caps(&info, caps) - || !(pool = gst_video_buffer_pool_new())) + if (!(pool = wg_video_buffer_pool_create(caps, transform->attrs.output_plane_align, + transform->allocator, &info, &align))) return false;
- align_video_info_planes(plane_align, &info, &align); - if ((params = gst_structure_new("video-meta", "padding-top", G_TYPE_UINT, align.padding_top, "padding-bottom", G_TYPE_UINT, align.padding_bottom, @@ -160,26 +202,11 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, gst_structure_free(params); }
- if (!(config = gst_buffer_pool_get_config(pool))) - GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); - else - { - gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META); - gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); - gst_buffer_pool_config_set_video_alignment(config, &align); - - gst_buffer_pool_config_set_params(config, caps, - info.size, 0, 0); - gst_buffer_pool_config_set_allocator(config, transform->allocator, NULL); - if (!gst_buffer_pool_set_config(pool, config)) - GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); - } - /* Prevent pool reconfiguration, we don't want another alignment. */ - if (!gst_buffer_pool_set_active(pool, true)) + if (!gst_buffer_pool_set_active(GST_BUFFER_POOL(pool), true)) GST_ERROR("%"GST_PTR_FORMAT" failed to activate.", pool);
- gst_query_add_allocation_pool(query, pool, info.size, 0, 0); + gst_query_add_allocation_pool(query, GST_BUFFER_POOL(pool), info.size, 0, 0); gst_query_add_allocation_param(query, transform->allocator, NULL);
GST_INFO("Proposing %"GST_PTR_FORMAT", buffer size %#zx, %"GST_PTR_FORMAT", for %"GST_PTR_FORMAT,
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/winegstreamer/wg_transform.c | 142 ++++++++++++++++++++---------- 1 file changed, 94 insertions(+), 48 deletions(-)
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 54198085b5b..e37a97113bc 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -54,14 +54,13 @@ struct wg_transform
GstAtomicQueue *input_queue;
- bool input_is_flipped; - GstElement *video_flip; - + struct wg_format input_format; struct wg_format output_format; GstAtomicQueue *output_queue; GstSample *output_sample; bool output_caps_changed; GstCaps *output_caps; + GstCaps *input_caps; };
static struct wg_transform *get_transform(wg_transform_t trans) @@ -69,7 +68,8 @@ static struct wg_transform *get_transform(wg_transform_t trans) return (struct wg_transform *)(ULONG_PTR)trans; }
-static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVideoAlignment *align) +static void align_video_info_planes(struct wg_format *format, gsize plane_align, + GstVideoInfo *info, GstVideoAlignment *align) { gst_video_alignment_reset(align);
@@ -81,11 +81,21 @@ static void align_video_info_planes(gsize plane_align, GstVideoInfo *info, GstVi align->stride_align[3] = plane_align;
gst_video_info_align(info, align); + + if (format->u.video.height < 0) + { + for (guint i = 0; i < ARRAY_SIZE(info->offset); ++i) + { + info->offset[i] += (info->height - 1) * info->stride[i]; + info->stride[i] = -info->stride[i]; + } + } }
typedef struct { GstVideoBufferPool parent; + GstVideoInfo info; } WgVideoBufferPool;
typedef struct @@ -95,11 +105,51 @@ typedef struct
G_DEFINE_TYPE(WgVideoBufferPool, wg_video_buffer_pool, GST_TYPE_VIDEO_BUFFER_POOL);
+static void buffer_add_video_meta(GstBuffer *buffer, GstVideoInfo *info) +{ + GstVideoMeta *meta; + + if (!(meta = gst_buffer_get_video_meta(buffer))) + meta = gst_buffer_add_video_meta(buffer, GST_VIDEO_FRAME_FLAG_NONE, + info->finfo->format, info->width, info->height); + + if (!meta) + GST_ERROR("Failed to add video meta to buffer %"GST_PTR_FORMAT, buffer); + else + { + memcpy(meta->offset, info->offset, sizeof(info->offset)); + memcpy(meta->stride, info->stride, sizeof(info->stride)); + } +} + +static GstFlowReturn wg_video_buffer_pool_alloc_buffer(GstBufferPool *gst_pool, GstBuffer **buffer, + GstBufferPoolAcquireParams *params) +{ + GstBufferPoolClass *parent_class = GST_BUFFER_POOL_CLASS(wg_video_buffer_pool_parent_class); + WgVideoBufferPool *pool = (WgVideoBufferPool *)gst_pool; + GstFlowReturn ret; + + GST_LOG("%"GST_PTR_FORMAT", buffer %p, params %p", pool, buffer, params); + + if (!(ret = parent_class->alloc_buffer(gst_pool, buffer, params))) + { + buffer_add_video_meta(*buffer, &pool->info); + GST_INFO("%"GST_PTR_FORMAT" allocated buffer %"GST_PTR_FORMAT, pool, *buffer); + } + + return ret; +} + static void wg_video_buffer_pool_init(WgVideoBufferPool * pool) {} -static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) {} + +static void wg_video_buffer_pool_class_init(WgVideoBufferPoolClass *klass) +{ + GstBufferPoolClass *pool_class = GST_BUFFER_POOL_CLASS(klass); + pool_class->alloc_buffer = wg_video_buffer_pool_alloc_buffer; +}
static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane_align, - GstAllocator *allocator, GstVideoInfo *info, GstVideoAlignment *align) + GstAllocator *allocator, struct wg_format *format, GstVideoAlignment *align) { WgVideoBufferPool *pool; GstStructure *config; @@ -107,8 +157,10 @@ static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane if (!(pool = g_object_new(wg_video_buffer_pool_get_type(), NULL))) return NULL;
- gst_video_info_from_caps(info, caps); - align_video_info_planes(plane_align, info, align); + if (!gst_video_info_from_caps(&pool->info, caps)) + GST_ERROR("Failed to create video info from caps %" GST_PTR_FORMAT, caps); + else + align_video_info_planes(format, plane_align, &pool->info, align);
if (!(config = gst_buffer_pool_get_config(GST_BUFFER_POOL(pool)))) GST_ERROR("Failed to get %"GST_PTR_FORMAT" config.", pool); @@ -118,7 +170,7 @@ static WgVideoBufferPool *wg_video_buffer_pool_create(GstCaps *caps, gsize plane gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT); gst_buffer_pool_config_set_video_alignment(config, align);
- gst_buffer_pool_config_set_params(config, caps, info->size, 0, 0); + gst_buffer_pool_config_set_params(config, caps, pool->info.size, 0, 0); gst_buffer_pool_config_set_allocator(config, allocator, NULL); if (!gst_buffer_pool_set_config(GST_BUFFER_POOL(pool), config)) GST_ERROR("Failed to set %"GST_PTR_FORMAT" config.", pool); @@ -178,7 +230,6 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, GstVideoAlignment align; GstStructure *params; gboolean needs_pool; - GstVideoInfo info; GstCaps *caps;
GST_LOG("transform %p, %"GST_PTR_FORMAT, transform, query); @@ -188,7 +239,7 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, return false;
if (!(pool = wg_video_buffer_pool_create(caps, transform->attrs.output_plane_align, - transform->allocator, &info, &align))) + transform->allocator, &transform->output_format, &align))) return false;
if ((params = gst_structure_new("video-meta", @@ -206,11 +257,11 @@ static gboolean transform_sink_query_allocation(struct wg_transform *transform, if (!gst_buffer_pool_set_active(GST_BUFFER_POOL(pool), true)) GST_ERROR("%"GST_PTR_FORMAT" failed to activate.", pool);
- gst_query_add_allocation_pool(query, GST_BUFFER_POOL(pool), info.size, 0, 0); + gst_query_add_allocation_pool(query, GST_BUFFER_POOL(pool), pool->info.size, 0, 0); gst_query_add_allocation_param(query, transform->allocator, NULL);
GST_INFO("Proposing %"GST_PTR_FORMAT", buffer size %#zx, %"GST_PTR_FORMAT", for %"GST_PTR_FORMAT, - pool, info.size, transform->allocator, query); + pool, pool->info.size, transform->allocator, query);
g_object_unref(pool); return true; @@ -349,27 +400,23 @@ NTSTATUS wg_transform_destroy(void *args) g_object_unref(transform->my_src); gst_query_unref(transform->drain_query); gst_caps_unref(transform->output_caps); + gst_caps_unref(transform->input_caps); gst_atomic_queue_unref(transform->output_queue); free(transform);
return STATUS_SUCCESS; }
-static bool wg_format_video_is_flipped(const struct wg_format *format) -{ - return format->major_type == WG_MAJOR_TYPE_VIDEO && (format->u.video.height < 0); -} - NTSTATUS wg_transform_create(void *args) { struct wg_transform_create_params *params = args; struct wg_format output_format = *params->output_format; struct wg_format input_format = *params->input_format; GstElement *first = NULL, *last = NULL, *element; - GstCaps *raw_caps = NULL, *src_caps = NULL; NTSTATUS status = STATUS_UNSUCCESSFUL; GstPadTemplate *template = NULL; struct wg_transform *transform; + GstCaps *raw_caps = NULL; const gchar *media_type; GstEvent *event;
@@ -386,18 +433,19 @@ NTSTATUS wg_transform_create(void *args) if (!(transform->allocator = wg_allocator_create())) goto out; transform->attrs = *params->attrs; + transform->input_format = input_format; transform->output_format = output_format;
- if (!(src_caps = transform_format_to_caps(transform, &input_format))) + if (!(transform->input_caps = transform_format_to_caps(transform, &input_format))) goto out; - if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, src_caps))) + if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; transform->my_src = gst_pad_new_from_template(template, "src"); g_object_unref(template); if (!transform->my_src) goto out;
- GST_INFO("transform %p input caps %"GST_PTR_FORMAT, transform, src_caps); + GST_INFO("transform %p input caps %"GST_PTR_FORMAT, transform, transform->input_caps);
gst_pad_set_element_private(transform->my_src, transform); gst_pad_set_query_function(transform->my_src, transform_src_query_cb); @@ -436,7 +484,7 @@ NTSTATUS wg_transform_create(void *args) case WG_MAJOR_TYPE_VIDEO_INDEO: case WG_MAJOR_TYPE_VIDEO_WMV: case WG_MAJOR_TYPE_VIDEO_MPEG1: - if (!(element = find_element(GST_ELEMENT_FACTORY_TYPE_DECODER, src_caps, raw_caps)) + if (!(element = find_element(GST_ELEMENT_FACTORY_TYPE_DECODER, transform->input_caps, raw_caps)) || !append_element(transform->container, element, &first, &last)) { gst_caps_unref(raw_caps); @@ -476,15 +524,6 @@ NTSTATUS wg_transform_create(void *args) break;
case WG_MAJOR_TYPE_VIDEO: - if (!(element = create_element("videoconvert", "base")) - || !append_element(transform->container, element, &first, &last)) - goto out; - if (!(transform->video_flip = create_element("videoflip", "base")) - || !append_element(transform->container, transform->video_flip, &first, &last)) - goto out; - transform->input_is_flipped = wg_format_video_is_flipped(&input_format); - if (transform->input_is_flipped != wg_format_video_is_flipped(&output_format)) - gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", "vertical-flip"); if (!(element = create_element("videoconvert", "base")) || !append_element(transform->container, element, &first, &last)) goto out; @@ -521,7 +560,7 @@ NTSTATUS wg_transform_create(void *args) if (!(event = gst_event_new_stream_start("stream")) || !push_event(transform->my_src, event)) goto out; - if (!(event = gst_event_new_caps(src_caps)) + if (!(event = gst_event_new_caps(transform->input_caps)) || !push_event(transform->my_src, event)) goto out;
@@ -534,8 +573,6 @@ NTSTATUS wg_transform_create(void *args) || !push_event(transform->my_src, event)) goto out;
- gst_caps_unref(src_caps); - GST_INFO("Created winegstreamer transform %p.", transform); params->transform = (wg_transform_t)(ULONG_PTR)transform; return STATUS_SUCCESS; @@ -547,8 +584,8 @@ out: gst_caps_unref(transform->output_caps); if (transform->my_src) gst_object_unref(transform->my_src); - if (src_caps) - gst_caps_unref(src_caps); + if (transform->input_caps) + gst_caps_unref(transform->input_caps); if (transform->allocator) wg_allocator_destroy(transform->allocator); if (transform->drain_query) @@ -599,15 +636,6 @@ NTSTATUS wg_transform_set_output_format(void *args) gst_caps_unref(transform->output_caps); transform->output_caps = caps;
- if (transform->video_flip) - { - const char *value; - if (transform->input_is_flipped != wg_format_video_is_flipped(&output_format)) - value = "vertical-flip"; - else - value = "none"; - gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", value); - } if (!push_event(transform->my_sink, gst_event_new_reconfigure())) { GST_ERROR("Failed to reconfigure transform %p.", transform); @@ -641,6 +669,7 @@ NTSTATUS wg_transform_push_data(void *args) struct wg_transform_push_data_params *params = args; struct wg_transform *transform = get_transform(params->transform); struct wg_sample *sample = params->sample; + GstVideoInfo video_info; GstBuffer *buffer; guint length;
@@ -664,6 +693,14 @@ NTSTATUS wg_transform_push_data(void *args) GST_INFO("Wrapped %u/%u bytes from sample %p to %"GST_PTR_FORMAT, sample->size, sample->max_size, sample, buffer); }
+ if (stream_type_from_caps(transform->input_caps) == GST_STREAM_TYPE_VIDEO + && gst_video_info_from_caps(&video_info, transform->input_caps)) + { + GstVideoAlignment align; + align_video_info_planes(&transform->input_format, 0, &video_info, &align); + buffer_add_video_meta(buffer, &video_info); + } + if (sample->flags & WG_SAMPLE_FLAG_HAS_PTS) GST_BUFFER_PTS(buffer) = sample->pts * 100; if (sample->flags & WG_SAMPLE_FLAG_HAS_DURATION) @@ -866,11 +903,20 @@ NTSTATUS wg_transform_read_data(void *args) else { gsize plane_align = transform->attrs.output_plane_align; + GstVideoMeta *meta;
dst_video_info = src_video_info;
- /* set the desired output buffer alignment on the dest video info */ - align_video_info_planes(plane_align, &dst_video_info, &align); + /* set the desired output buffer alignment and stride on the dest video info */ + align_video_info_planes(&transform->output_format, plane_align, + &dst_video_info, &align); + + /* copy the actual output buffer alignment and stride to the src video info */ + if ((meta = gst_buffer_get_video_meta(output_buffer))) + { + memcpy(src_video_info.offset, meta->offset, sizeof(meta->offset)); + memcpy(src_video_info.stride, meta->stride, sizeof(meta->stride)); + }
src_video_info_ptr = &src_video_info; dst_video_info_ptr = &dst_video_info;
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/mf/tests/transform.c | 4 +- dlls/winegstreamer/mfplat.c | 33 +++++++++-------- dlls/winegstreamer/wg_transform.c | 61 +++++++++++++++++++++++++++---- 3 files changed, 74 insertions(+), 24 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index 468239b0fd4..0129f8a112b 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -7947,8 +7947,8 @@ static void test_video_processor(void) check_mft_set_output_type(transform, rgb32_no_aperture, S_OK); check_mft_get_output_current_type(transform, rgb32_no_aperture);
- check_mft_set_input_type_(__LINE__, transform, nv12_with_aperture, TRUE); - check_mft_get_input_current_type_(__LINE__, transform, nv12_with_aperture, TRUE, FALSE); + check_mft_set_input_type(transform, nv12_with_aperture); + check_mft_get_input_current_type(transform, nv12_with_aperture);
/* output type is the same as before */ check_mft_get_output_current_type(transform, rgb32_no_aperture); diff --git a/dlls/winegstreamer/mfplat.c b/dlls/winegstreamer/mfplat.c index e6d9fb9fd2c..c27a2646937 100644 --- a/dlls/winegstreamer/mfplat.c +++ b/dlls/winegstreamer/mfplat.c @@ -519,6 +519,22 @@ static IMFMediaType *mf_media_type_from_wg_format_video(const struct wg_format * if (FAILED(MFCreateMediaType(&type))) return NULL;
+ if (format->u.video.padding.left || format->u.video.padding.right + || format->u.video.padding.top || format->u.video.padding.bottom) + { + MFVideoArea aperture = + { + .OffsetX = {.value = format->u.video.padding.left}, + .OffsetY = {.value = format->u.video.padding.top}, + .Area.cx = width, .Area.cy = height, + }; + width += format->u.video.padding.left + format->u.video.padding.right; + height += format->u.video.padding.top + format->u.video.padding.bottom; + + IMFMediaType_SetBlob(type, &MF_MT_MINIMUM_DISPLAY_APERTURE, + (BYTE *)&aperture, sizeof(aperture)); + } + IMFMediaType_SetGUID(type, &MF_MT_MAJOR_TYPE, &MFMediaType_Video); IMFMediaType_SetGUID(type, &MF_MT_SUBTYPE, video_formats[i].subtype); IMFMediaType_SetUINT64(type, &MF_MT_FRAME_SIZE, make_uint64(width, height)); @@ -532,21 +548,6 @@ static IMFMediaType *mf_media_type_from_wg_format_video(const struct wg_format * stride = -stride; IMFMediaType_SetUINT32(type, &MF_MT_DEFAULT_STRIDE, stride);
- if (format->u.video.padding.left || format->u.video.padding.right - || format->u.video.padding.top || format->u.video.padding.bottom) - { - MFVideoArea aperture = - { - .OffsetX = {.value = format->u.video.padding.left}, - .OffsetY = {.value = format->u.video.padding.top}, - .Area.cx = width - format->u.video.padding.right - format->u.video.padding.left, - .Area.cy = height - format->u.video.padding.bottom - format->u.video.padding.top, - }; - - IMFMediaType_SetBlob(type, &MF_MT_MINIMUM_DISPLAY_APERTURE, - (BYTE *)&aperture, sizeof(aperture)); - } - return type; } } @@ -706,6 +707,8 @@ static void mf_media_type_to_wg_format_video(IMFMediaType *type, const GUID *sub format->u.video.padding.top = aperture.OffsetY.value; format->u.video.padding.right = format->u.video.width - aperture.Area.cx - aperture.OffsetX.value; format->u.video.padding.bottom = format->u.video.height - aperture.Area.cy - aperture.OffsetY.value; + format->u.video.width -= format->u.video.padding.left + format->u.video.padding.right; + format->u.video.height -= format->u.video.padding.top + format->u.video.padding.bottom; }
if (SUCCEEDED(IMFMediaType_GetUINT64(type, &MF_MT_FRAME_RATE, &frame_rate)) && (UINT32)frame_rate) diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index e37a97113bc..45a9597edca 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -75,6 +75,18 @@ static void align_video_info_planes(struct wg_format *format, gsize plane_align,
align->padding_right = ((plane_align + 1) - (info->width & plane_align)) & plane_align; align->padding_bottom = ((plane_align + 1) - (info->height & plane_align)) & plane_align; + align->padding_right = max(align->padding_right, format->u.video.padding.right); + align->padding_bottom = max(align->padding_bottom, format->u.video.padding.bottom); + align->padding_top = format->u.video.padding.top; + align->padding_left = format->u.video.padding.left; + + if (format->u.video.height < 0) + { + gsize top = align->padding_top; + align->padding_top = align->padding_bottom; + align->padding_bottom = top; + } + align->stride_align[0] = plane_align; align->stride_align[1] = plane_align; align->stride_align[2] = plane_align; @@ -92,6 +104,36 @@ static void align_video_info_planes(struct wg_format *format, gsize plane_align, } }
+static void update_format_width_padding(struct wg_format *max, struct wg_format *min) +{ + max->u.video.padding.right += max->u.video.width - min->u.video.width; + max->u.video.width = min->u.video.width; +} + +static void update_format_height_padding(struct wg_format *max, struct wg_format *min) +{ + max->u.video.padding.bottom += abs(max->u.video.height) - abs(min->u.video.height); + max->u.video.height = (max->u.video.height < 0 ? -1 : 1) * abs(min->u.video.height); +} + +static void update_format_padding(struct wg_format *input_format, struct wg_format *output_format) +{ + if (input_format->major_type != output_format->major_type) + return; + if (input_format->major_type != WG_MAJOR_TYPE_VIDEO) + return; + + if (input_format->u.video.width > output_format->u.video.width) + update_format_width_padding(input_format, output_format); + else + update_format_width_padding(output_format, input_format); + + if (abs(input_format->u.video.height) > abs(output_format->u.video.height)) + update_format_height_padding(input_format, output_format); + else + update_format_height_padding(output_format, input_format); +} + typedef struct { GstVideoBufferPool parent; @@ -433,6 +475,9 @@ NTSTATUS wg_transform_create(void *args) if (!(transform->allocator = wg_allocator_create())) goto out; transform->attrs = *params->attrs; + + /* GStreamer cannot include the buffer padding in the frame sizes but MF does, make sure the formats have the same */ + update_format_padding(&input_format, &output_format); transform->input_format = input_format; transform->output_format = output_format;
@@ -612,6 +657,13 @@ NTSTATUS wg_transform_set_output_format(void *args) GstSample *sample; GstCaps *caps;
+ if (output_format.major_type == WG_MAJOR_TYPE_VIDEO) + { + /* GStreamer cannot include the buffer padding in the frame sizes but MF does, make sure the formats have the same */ + update_format_width_padding(&output_format, &transform->output_format); + update_format_height_padding(&output_format, &transform->output_format); + } + if (!(caps = transform_format_to_caps(transform, &output_format))) { GST_ERROR("Failed to convert format to caps."); @@ -933,18 +985,13 @@ NTSTATUS wg_transform_read_data(void *args) wg_format_from_caps(&output_format, output_caps); if (output_format.major_type == WG_MAJOR_TYPE_VIDEO) { + if (transform->output_format.u.video.height < 0) + output_format.u.video.height *= -1; output_format.u.video.padding.left = align.padding_left; - output_format.u.video.width += output_format.u.video.padding.left; output_format.u.video.padding.right = align.padding_right; - output_format.u.video.width += output_format.u.video.padding.right; output_format.u.video.padding.top = align.padding_top; - output_format.u.video.height += output_format.u.video.padding.top; output_format.u.video.padding.bottom = align.padding_bottom; - output_format.u.video.height += output_format.u.video.padding.bottom; GST_INFO("new video padding rect %s", wine_dbgstr_rect(&output_format.u.video.padding)); - - if (transform->output_format.u.video.height < 0) - output_format.u.video.height *= -1; }
if (format)
Sorry for the late review.
A lot of these patches need more description, ideally description in the patch subject itself.
What's the point of 1/5; how does it help to calculate that info sooner?
Why 2/5? Isn't the decoder supposed to update the output format manually? Does 2/5 solve a specific problem, or is it just to make the code more conceptually correct?
Why 4/5? How does it help to flip that· way?
5/5 seems to be a refactor (changing the semantics of wg_format width/height to no longer include padding) at the same time as a functional change. From what I can read of the functional change I find it very confusing, also. Why can't we just put a meta on the input buffer? In what case will the input and output size be inconsistent, once we've already discarded padding?
1/5 also feels awkward, mostly because of those _ptr variables, although that awkwardness could be solved by just not making [all of] read_transform_output_data() a separate function.
(Also, those GstVideoInfo pointers should be const. I was trying to figure out why they were output...)