From: Rémi Bernon rbernon@codeweavers.com
--- dlls/mf/tests/transform.c | 50 ++++++---------- dlls/winegstreamer/wg_media_type.c | 11 ++++ dlls/winegstreamer/wg_transform.c | 93 ++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+), 32 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index deec2c7dd92..ada915a527a 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -7704,7 +7704,7 @@ static void test_video_processor(void) { .input_type_desc = nv12_with_aperture, .input_bitmap = L"nv12frame.bmp", .output_type_desc = rgb32_no_aperture, .output_bitmap = L"rgb32frame-crop-flip.bmp", - .output_sample_desc = &rgb32_crop_sample_desc, + .output_sample_desc = &rgb32_crop_sample_desc, .delta = 2, /* Windows returns 0, Wine needs 2 */ }, { .input_type_desc = rgb32_no_aperture, .input_bitmap = L"rgb32frame-crop-flip.bmp", @@ -8060,23 +8060,6 @@ static void test_video_processor(void) check_mft_set_input_type(transform, test->input_type_desc, S_OK); check_mft_get_input_current_type(transform, test->input_type_desc);
- if (i >= 15) - { - IMFMediaType *media_type; - HRESULT hr; - - hr = MFCreateMediaType(&media_type); - ok(hr == S_OK, "MFCreateMediaType returned hr %#lx.\n", hr); - init_media_type(media_type, test->output_type_desc, -1); - hr = IMFTransform_SetOutputType(transform, 0, media_type, 0); - todo_wine - ok(hr == S_OK, "SetOutputType returned %#lx.\n", hr); - IMFMediaType_Release(media_type); - - if (hr != S_OK) - goto skip_test; - } - check_mft_set_output_type_required(transform, test->output_type_desc); check_mft_set_output_type(transform, test->output_type_desc, S_OK); check_mft_get_output_current_type(transform, test->output_type_desc); @@ -8188,7 +8171,6 @@ static void test_video_processor(void) ret = IMFSample_Release(output_sample); ok(ret == 0, "Release returned %lu\n", ret);
-skip_test: winetest_pop_context();
hr = IMFTransform_SetInputType(transform, 0, NULL, 0); @@ -8213,8 +8195,8 @@ skip_test: check_mft_set_output_type(transform, rgb32_no_aperture, S_OK); check_mft_get_output_current_type(transform, rgb32_no_aperture);
- check_mft_set_input_type_(__LINE__, transform, nv12_with_aperture, S_OK, TRUE); - check_mft_get_input_current_type_(__LINE__, transform, nv12_with_aperture, TRUE, FALSE); + check_mft_set_input_type(transform, nv12_with_aperture, S_OK); + check_mft_get_input_current_type(transform, nv12_with_aperture);
/* output type is the same as before */ check_mft_get_output_current_type(transform, rgb32_no_aperture); @@ -8879,7 +8861,13 @@ static void test_h264_with_dxgi_manager(void)
status = 0; hr = get_next_h264_output_sample(transform, &input_sample, NULL, output, &data, &data_len); + todo_wine_if(hr == MF_E_UNEXPECTED) /* with some llvmpipe versions */ ok(hr == S_OK, "got %#lx\n", hr); + if (hr == MF_E_UNEXPECTED) + { + IMFSample_Release(input_sample); + goto failed; + } ok(sample != output[0].pSample, "got %p.\n", output[0].pSample); sample = output[0].pSample;
@@ -9524,7 +9512,7 @@ static void test_video_processor_with_dxgi_manager(void) /* check RGB32 output aperture cropping with D3D buffers */
check_mft_set_input_type(transform, nv12_with_aperture, S_OK); - check_mft_set_output_type_(__LINE__, transform, rgb32_no_aperture, S_OK, TRUE); + check_mft_set_output_type(transform, rgb32_no_aperture, S_OK);
load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); /* skip BMP header and RGB data from the dump */ @@ -9536,7 +9524,7 @@ static void test_video_processor_with_dxgi_manager(void) input_sample = create_d3d_sample(allocator, nv12frame_data, nv12frame_data_len);
hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr);
hr = IMFTransform_GetOutputStreamInfo(transform, 0, &info); ok(hr == S_OK, "got %#lx\n", hr); @@ -9545,9 +9533,9 @@ static void test_video_processor_with_dxgi_manager(void) status = 0; memset(&output, 0, sizeof(output)); hr = IMFTransform_ProcessOutput(transform, 0, 1, &output, &status); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr); ok(!output.pEvents, "got events\n"); - todo_wine ok(!!output.pSample, "got no sample\n"); + ok(!!output.pSample, "got no sample\n"); ok(output.dwStatus == 0, "got %#lx\n", output.dwStatus); ok(status == 0, "got %#lx\n", status); if (!output.pSample) goto skip_rgb32; @@ -9582,7 +9570,6 @@ static void test_video_processor_with_dxgi_manager(void) IMFSample_Release(output.pSample);
ret = check_mf_sample_collection(output_samples, &output_sample_desc_rgb32_crop, L"rgb32frame-crop.bmp"); - todo_wine /* FIXME: video process vertically flips the frame... */ ok(ret <= 5, "got %lu%% diff\n", ret);
IMFCollection_Release(output_samples); @@ -9592,7 +9579,7 @@ skip_rgb32: /* check ABGR32 output with D3D buffers */
check_mft_set_input_type(transform, nv12_with_aperture, S_OK); - check_mft_set_output_type_(__LINE__, transform, abgr32_no_aperture, S_OK, TRUE); + check_mft_set_output_type(transform, abgr32_no_aperture, S_OK);
load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); /* skip BMP header and RGB data from the dump */ @@ -9604,7 +9591,7 @@ skip_rgb32: input_sample = create_d3d_sample(allocator, nv12frame_data, nv12frame_data_len);
hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr);
hr = IMFTransform_GetOutputStreamInfo(transform, 0, &info); ok(hr == S_OK, "got %#lx\n", hr); @@ -9613,9 +9600,9 @@ skip_rgb32: status = 0; memset(&output, 0, sizeof(output)); hr = IMFTransform_ProcessOutput(transform, 0, 1, &output, &status); - todo_wine ok(hr == S_OK, "got %#lx\n", hr); + ok(hr == S_OK, "got %#lx\n", hr); ok(!output.pEvents, "got events\n"); - todo_wine ok(!!output.pSample, "got no sample\n"); + ok(!!output.pSample, "got no sample\n"); ok(output.dwStatus == 0, "got %#lx\n", output.dwStatus); ok(status == 0, "got %#lx\n", status); if (!output.pSample) goto skip_abgr32; @@ -9631,7 +9618,7 @@ skip_rgb32: ID3D11Texture2D_GetDesc(tex2d, &desc); ok(desc.Format == DXGI_FORMAT_R8G8B8A8_UNORM, "got %#x.\n", desc.Format); ok(!desc.Usage, "got %u.\n", desc.Usage); - ok(desc.BindFlags == D3D11_BIND_RENDER_TARGET, "got %#x.\n", desc.BindFlags); + todo_wine ok(desc.BindFlags == D3D11_BIND_RENDER_TARGET, "got %#x.\n", desc.BindFlags); ok(!desc.CPUAccessFlags, "got %#x.\n", desc.CPUAccessFlags); ok(!desc.MiscFlags, "got %#x.\n", desc.MiscFlags); ok(desc.MipLevels == 1, "git %u.\n", desc.MipLevels); @@ -9650,7 +9637,6 @@ skip_rgb32: IMFSample_Release(output.pSample);
ret = check_mf_sample_collection(output_samples, &output_sample_desc_abgr32_crop, L"abgr32frame-crop.bmp"); - todo_wine /* FIXME: video process vertically flips the frame... */ ok(ret <= 8 /* NVIDIA needs 5, AMD needs 8 */, "got %lu%% diff\n", ret);
IMFCollection_Release(output_samples); diff --git a/dlls/winegstreamer/wg_media_type.c b/dlls/winegstreamer/wg_media_type.c index 14fc1a9cdf4..16eb67e1398 100644 --- a/dlls/winegstreamer/wg_media_type.c +++ b/dlls/winegstreamer/wg_media_type.c @@ -381,6 +381,11 @@ static GstVideoFormat subtype_to_gst_video_format(const GUID *subtype) return GST_VIDEO_FORMAT_ENCODED; }
+static BOOL is_mf_video_area_empty(const MFVideoArea *area) +{ + return !area->OffsetX.value && !area->OffsetY.value && !area->Area.cx && !area->Area.cy; +} + static GstCaps *caps_from_video_format(const MFVIDEOFORMAT *format, UINT32 format_size) { GstVideoFormat video_format = subtype_to_gst_video_format(&format->guidFormat); @@ -410,6 +415,12 @@ static GstCaps *caps_from_video_format(const MFVIDEOFORMAT *format, UINT32 forma format->videoInfo.FramesPerSecond.Numerator, format->videoInfo.FramesPerSecond.Denominator, NULL);
+ if (!is_mf_video_area_empty(&format->videoInfo.MinimumDisplayAperture)) + { + gst_caps_set_simple(caps, "width", G_TYPE_INT, format->videoInfo.MinimumDisplayAperture.Area.cx, NULL); + gst_caps_set_simple(caps, "height", G_TYPE_INT, format->videoInfo.MinimumDisplayAperture.Area.cy, NULL); + } + if (video_format == GST_VIDEO_FORMAT_ENCODED) init_caps_from_video_subtype(caps, &format->guidFormat, format, format_size);
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 2537331a118..feaf7c85dad 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -69,13 +69,35 @@ static struct wg_transform *get_transform(wg_transform_t trans) return (struct wg_transform *)(ULONG_PTR)trans; }
+static BOOL is_mf_video_area_empty(const MFVideoArea *area) +{ + return !area->OffsetX.value && !area->OffsetY.value && !area->Area.cx && !area->Area.cy; +} + static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, GstVideoInfo *info, GstVideoAlignment *align) { + const MFVideoArea *aperture = &video_info->MinimumDisplayAperture; + gst_video_alignment_reset(align);
align->padding_right = ((plane_align + 1) - (info->width & plane_align)) & plane_align; align->padding_bottom = ((plane_align + 1) - (info->height & plane_align)) & plane_align; + if (!is_mf_video_area_empty(aperture)) + { + align->padding_right = max(align->padding_right, video_info->dwWidth - aperture->OffsetX.value - aperture->Area.cx); + align->padding_bottom = max(align->padding_bottom, video_info->dwHeight - aperture->OffsetY.value - aperture->Area.cy); + align->padding_top = aperture->OffsetX.value; + align->padding_left = aperture->OffsetY.value; + } + + if (video_info->VideoFlags & MFVideoFlag_BottomUpLinearRep) + { + gsize top = align->padding_top; + align->padding_top = align->padding_bottom; + align->padding_bottom = top; + } + align->stride_align[0] = plane_align; align->stride_align[1] = plane_align; align->stride_align[2] = plane_align; @@ -93,6 +115,57 @@ static void align_video_info_planes(MFVideoInfo *video_info, gsize plane_align, } }
+static void init_mf_video_info_rect(const MFVideoInfo *info, RECT *rect) +{ + if (!is_mf_video_area_empty(&info->MinimumDisplayAperture)) + { + rect->left = info->MinimumDisplayAperture.OffsetX.value; + rect->top = info->MinimumDisplayAperture.OffsetY.value; + rect->right = rect->left + info->MinimumDisplayAperture.Area.cx; + rect->bottom = rect->top + info->MinimumDisplayAperture.Area.cy; + } + else + { + rect->left = 0; + rect->top = 0; + rect->right = info->dwWidth; + rect->bottom = info->dwHeight; + } +} + +static inline BOOL intersect_rect(RECT *dst, const RECT *src1, const RECT *src2) +{ + dst->left = max(src1->left, src2->left); + dst->top = max(src1->top, src2->top); + dst->right = min(src1->right, src2->right); + dst->bottom = min(src1->bottom, src2->bottom); + return !IsRectEmpty(dst); +} + +static void update_video_aperture(MFVideoInfo *input_info, MFVideoInfo *output_info) +{ + RECT rect, input_rect, output_rect; + + init_mf_video_info_rect(input_info, &input_rect); + init_mf_video_info_rect(output_info, &output_rect); + intersect_rect(&rect, &input_rect, &output_rect); + + input_info->MinimumDisplayAperture.OffsetX.value = rect.left; + input_info->MinimumDisplayAperture.OffsetY.value = rect.top; + input_info->MinimumDisplayAperture.Area.cx = rect.right - rect.left; + input_info->MinimumDisplayAperture.Area.cy = rect.bottom - rect.top; + output_info->MinimumDisplayAperture = input_info->MinimumDisplayAperture; +} + +static void set_video_caps_aperture(GstCaps *caps, MFVideoInfo *video_info) +{ + if (!is_mf_video_area_empty(&video_info->MinimumDisplayAperture)) + { + gst_caps_set_simple(caps, "width", G_TYPE_INT, video_info->MinimumDisplayAperture.Area.cx, NULL); + gst_caps_set_simple(caps, "height", G_TYPE_INT, video_info->MinimumDisplayAperture.Area.cy, NULL); + } +} + typedef struct { GstVideoBufferPool parent; @@ -490,6 +563,15 @@ NTSTATUS wg_transform_create(void *args) if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) transform->output_info = params->output_type.u.video->videoInfo;
+ /* update the video apertures to make sure GStreamer has a consistent input/output frame size */ + if (!strcmp(input_mime, "video/x-raw") && !strcmp(output_mime, "video/x-raw")) + update_video_aperture(&transform->input_info, &transform->output_info); + + if (IsEqualGUID(¶ms->input_type.major, &MFMediaType_Video)) + set_video_caps_aperture(transform->input_caps, &transform->input_info); + if (IsEqualGUID(¶ms->output_type.major, &MFMediaType_Video)) + set_video_caps_aperture(transform->output_caps, &transform->output_info); + if (!(template = gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, transform->input_caps))) goto out; transform->my_src = gst_pad_new_from_template(template, "src"); @@ -672,6 +754,7 @@ NTSTATUS wg_transform_set_output_type(void *args) { struct wg_transform_set_output_type_params *params = args; struct wg_transform *transform = get_transform(params->transform); + const char *input_mime, *output_mime; GstCaps *caps, *stripped; GstSample *sample;
@@ -681,9 +764,19 @@ NTSTATUS wg_transform_set_output_type(void *args) return STATUS_UNSUCCESSFUL; }
+ input_mime = gst_structure_get_name(gst_caps_get_structure(transform->input_caps, 0)); + output_mime = gst_structure_get_name(gst_caps_get_structure(caps, 0)); + if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) transform->output_info = params->media_type.u.video->videoInfo;
+ /* update the video apertures to make sure GStreamer has a consistent input/output frame size */ + if (!strcmp(input_mime, "video/x-raw") && !strcmp(output_mime, "video/x-raw")) + update_video_aperture(&transform->input_info, &transform->output_info); + + if (IsEqualGUID(¶ms->media_type.major, &MFMediaType_Video)) + set_video_caps_aperture(caps, &transform->output_info); + GST_INFO("transform %p output caps %"GST_PTR_FORMAT, transform, caps);
stripped = caps_strip_fields(caps, transform->attrs.allow_format_change);