The app I'm considering opens a video_processor on its own, with a NV12 format on input and a ARGB32 format on output.
Tested on Windows: the samples are flipped vertically. While Wine keeps them untouched.
So added a videoflip in the video processor to be activated when needed. Current activation is based on RGB vs non RGB input/output formats.
Set as draft as if somehow related to MR!2159. Comments welcomed.
Signed-off-by: Eric Pouech epouech@codeweavers.com
-- v8: winegstreamer: In video_processor, activate a videoflip converter. mf/tests: Add tests about (negative) stride handling.
From: Eric Pouech epouech@codeweavers.com
Signed-off-by: Eric Pouech epouech@codeweavers.com --- dlls/mf/tests/transform.c | 579 ++++++++++++++++++++++++++++---------- 1 file changed, 434 insertions(+), 145 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index b41f87afabe..fb1b0f6c9c6 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -4971,6 +4971,37 @@ static void test_wmv_decoder(void) ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), {0}, }; + const struct attribute_desc output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_NV12, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width), + {0}, + }; + const struct attribute_desc output_type_desc_rgb[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + {0}, + }; + const struct attribute_desc output_type_desc_rgb_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + {0}, + }; + const struct attribute_desc output_type_desc_rgb_positive_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + {0}, + }; const struct attribute_desc expect_input_type_desc[] = { ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), @@ -4992,12 +5023,44 @@ static void test_wmv_decoder(void) ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), {0}, }; + const struct attribute_desc expect_output_type_desc_rgb[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + ATTR_UINT32(MF_MT_SAMPLE_SIZE, actual_width * actual_height * 4), + ATTR_UINT32(MF_MT_FIXED_SIZE_SAMPLES, 1), + ATTR_UINT32(MF_MT_VIDEO_NOMINAL_RANGE, 2), + ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), + ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), + {0}, + }; + const struct attribute_desc expect_output_type_desc_rgb_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + ATTR_UINT32(MF_MT_SAMPLE_SIZE, actual_width * actual_height * 4), + ATTR_UINT32(MF_MT_FIXED_SIZE_SAMPLES, 1), + ATTR_UINT32(MF_MT_VIDEO_NOMINAL_RANGE, 2), + ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), + ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), + {0}, + }; const MFT_OUTPUT_STREAM_INFO expect_output_info = { .dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER | MFT_OUTPUT_STREAM_DISCARDABLE, .cbSize = 0x3600, .cbAlignment = 1, }; + const MFT_OUTPUT_STREAM_INFO expect_output_info_rgb = + { + .dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER | MFT_OUTPUT_STREAM_DISCARDABLE, + .cbSize = 0x9000, + .cbAlignment = 1, + }; const MFT_OUTPUT_STREAM_INFO empty_output_info = { .dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER | MFT_OUTPUT_STREAM_DISCARDABLE, @@ -5007,6 +5070,11 @@ static void test_wmv_decoder(void) .cbSize = 0x3600, .cbAlignment = 1, }; + const MFT_INPUT_STREAM_INFO expect_input_info_rgb = + { + .cbSize = 0x9000, + .cbAlignment = 1, + };
const struct attribute_desc output_sample_attributes[] = { @@ -5018,12 +5086,92 @@ static void test_wmv_decoder(void) .length = actual_width * actual_height * 3 / 2, .compare = compare_nv12, .dump = dump_nv12, .rect = {.right = 82, .bottom = 84}, }; + const struct buffer_desc output_buffer_desc_rgb = + { + .length = actual_width * actual_height * 4, + .compare = compare_rgb32, .dump = dump_rgb32, .rect = {.right = 82, .bottom = 84}, + }; const struct sample_desc output_sample_desc_nv12 = { .attributes = output_sample_attributes, .sample_time = 0, .sample_duration = 333333, .buffer_count = 1, .buffers = &output_buffer_desc_nv12, }; + const struct sample_desc output_sample_desc_rgb = + { + .attributes = output_sample_attributes, + .sample_time = 0, .sample_duration = 333333, + .buffer_count = 1, .buffers = &output_buffer_desc_rgb, + }; + + const struct transform_desc + { + const struct attribute_desc *output_type_desc; + const struct attribute_desc *expect_output_type_desc; + const MFT_INPUT_STREAM_INFO *expect_input_info; + const MFT_OUTPUT_STREAM_INFO *expect_output_info; + const struct sample_desc *output_sample_desc; + const WCHAR *result_bitmap; + ULONG delta; + } + transform_tests[] = + { + { + /* WMV1 -> YUV */ + .output_type_desc = output_type_desc, + .expect_output_type_desc = expect_output_type_desc, + .expect_input_info = &expect_input_info, + .expect_output_info = &expect_output_info, + .output_sample_desc = &output_sample_desc_nv12, + .result_bitmap = L"nv12frame.bmp", + .delta = 0, + }, + + { + /* WMV1 -> YUV (negative stride) */ + .output_type_desc = output_type_desc_negative_stride, + .expect_output_type_desc = expect_output_type_desc, + .expect_input_info = &expect_input_info, + .expect_output_info = &expect_output_info, + .output_sample_desc = &output_sample_desc_nv12, + .result_bitmap = L"nv12frame.bmp", + .delta = 0, + }, + + { + /* WMV1 -> RGB */ + .output_type_desc = output_type_desc_rgb, + .expect_output_type_desc = expect_output_type_desc_rgb, + .expect_input_info = &expect_input_info_rgb, + .expect_output_info = &expect_output_info_rgb, + .output_sample_desc = &output_sample_desc_rgb, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 5, + }, + + { + /* WMV1 -> RGB (negative stride) */ + .output_type_desc = output_type_desc_rgb_negative_stride, + .expect_output_type_desc = expect_output_type_desc_rgb_negative_stride, + .expect_input_info = &expect_input_info_rgb, + .expect_output_info = &expect_output_info_rgb, + .output_sample_desc = &output_sample_desc_rgb, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 5, + }, + + { + /* WMV1 -> RGB (positive stride */ + .output_type_desc = output_type_desc_rgb_positive_stride, + .expect_output_type_desc = expect_output_type_desc_rgb, + .expect_input_info = &expect_input_info_rgb, + .expect_output_info = &expect_output_info_rgb, + .output_sample_desc = &output_sample_desc_rgb, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 5, + }, + + };
MFT_REGISTER_TYPE_INFO output_type = {MFMediaType_Video, MFVideoFormat_NV12}; MFT_REGISTER_TYPE_INFO input_type = {MFMediaType_Video, MFVideoFormat_WMV1}; @@ -5034,7 +5182,7 @@ static void test_wmv_decoder(void) const BYTE *wmvenc_data; ULONG wmvenc_data_len; DWORD output_status; - ULONG i, ret, ref; + ULONG i, j, ret, ref; HRESULT hr;
hr = CoInitialize(NULL); @@ -5114,50 +5262,57 @@ static void test_wmv_decoder(void) ok(hr == MF_E_NO_MORE_TYPES, "GetOutputAvailableType returned %#lx\n", hr); ok(i == ARRAY_SIZE(expect_available_outputs), "%lu input media types\n", i);
- check_mft_set_output_type_required(transform, output_type_desc); - check_mft_set_output_type(transform, output_type_desc, S_OK); - check_mft_get_output_current_type_(transform, expect_output_type_desc, FALSE, TRUE); + for (j = 0; j < ARRAY_SIZE(transform_tests); j++) + { + winetest_push_context("transform #%lu", j);
- check_mft_get_input_stream_info(transform, S_OK, &expect_input_info); - check_mft_get_output_stream_info(transform, S_OK, &expect_output_info); + check_mft_set_output_type_required(transform, transform_tests[j].output_type_desc); + check_mft_set_output_type(transform, transform_tests[j].output_type_desc, S_OK); + check_mft_get_output_current_type_(transform, transform_tests[j].expect_output_type_desc, FALSE, TRUE);
- load_resource(L"wmvencdata.bin", &wmvenc_data, &wmvenc_data_len); + check_mft_get_input_stream_info(transform, S_OK, transform_tests[j].expect_input_info); + check_mft_get_output_stream_info(transform, S_OK, transform_tests[j].expect_output_info);
- input_sample = create_sample(wmvenc_data + sizeof(DWORD), *(DWORD *)wmvenc_data); - wmvenc_data_len -= *(DWORD *)wmvenc_data + sizeof(DWORD); - wmvenc_data += *(DWORD *)wmvenc_data + sizeof(DWORD); - hr = IMFSample_SetSampleTime(input_sample, 0); - ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); - hr = IMFSample_SetSampleDuration(input_sample, 333333); - ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); - ret = IMFSample_Release(input_sample); - ok(ret <= 1, "Release returned %ld\n", ret); + load_resource(L"wmvencdata.bin", &wmvenc_data, &wmvenc_data_len);
- hr = MFCreateCollection(&output_samples); - ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + input_sample = create_sample(wmvenc_data + sizeof(DWORD), *(DWORD *)wmvenc_data); + wmvenc_data_len -= *(DWORD *)wmvenc_data + sizeof(DWORD); + wmvenc_data += *(DWORD *)wmvenc_data + sizeof(DWORD); + hr = IMFSample_SetSampleTime(input_sample, 0); + ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); + hr = IMFSample_SetSampleDuration(input_sample, 333333); + ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); + ret = IMFSample_Release(input_sample); + ok(ret <= 1, "Release returned %ld\n", ret);
- output_sample = create_sample(NULL, expect_output_info.cbSize); - for (i = 0; SUCCEEDED(hr = check_mft_process_output(transform, output_sample, &output_status)); i++) - { - winetest_push_context("%lu", i); - ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); - hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); - ok(hr == S_OK, "AddElement returned %#lx\n", hr); - ref = IMFSample_Release(output_sample); - ok(ref == 1, "Release returned %ld\n", ref); - output_sample = create_sample(NULL, expect_output_info.cbSize); + hr = MFCreateCollection(&output_samples); + ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + + output_sample = create_sample(NULL, transform_tests[j].expect_output_info->cbSize); + for (i = 0; SUCCEEDED(hr = check_mft_process_output(transform, output_sample, &output_status)); i++) + { + winetest_push_context("%lu", i); + ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); + hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); + ok(hr == S_OK, "AddElement returned %#lx\n", hr); + ref = IMFSample_Release(output_sample); + ok(ref == 1, "Release returned %ld\n", ref); + output_sample = create_sample(NULL, transform_tests[j].expect_output_info->cbSize); + winetest_pop_context(); + } + ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); + ret = IMFSample_Release(output_sample); + ok(ret == 0, "Release returned %lu\n", ret); + ok(i == 1, "got %lu output samples\n", i); + + ret = check_mf_sample_collection(output_samples, transform_tests[j].output_sample_desc, + transform_tests[j].result_bitmap); + ok(ret <= transform_tests[j].delta, "got %lu%% diff\n", ret); + IMFCollection_Release(output_samples); winetest_pop_context(); } - ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); - ret = IMFSample_Release(output_sample); - ok(ret == 0, "Release returned %lu\n", ret); - ok(i == 1, "got %lu output samples\n", i); - - ret = check_mf_sample_collection(output_samples, &output_sample_desc_nv12, L"nv12frame.bmp"); - ok(ret == 0, "got %lu%% diff\n", ret); - IMFCollection_Release(output_samples);
skip_tests: ret = IMFTransform_Release(transform); @@ -5591,6 +5746,22 @@ static void test_color_convert(void) ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), {0}, }; + const struct attribute_desc output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + {0}, + }; + const struct attribute_desc output_type_desc_positive_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + {0}, + }; const struct attribute_desc expect_input_type_desc[] = { ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), @@ -5616,6 +5787,18 @@ static void test_color_convert(void) ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), {0}, }; + const struct attribute_desc expect_output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + ATTR_UINT32(MF_MT_SAMPLE_SIZE, actual_width * actual_height * 4), + ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), + ATTR_UINT32(MF_MT_FIXED_SIZE_SAMPLES, 1), + ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), + {0}, + }; const MFT_OUTPUT_STREAM_INFO output_info = { .cbSize = actual_width * actual_height * 4, @@ -5643,6 +5826,41 @@ static void test_color_convert(void) .sample_time = 0, .sample_duration = 10000000, .buffer_count = 1, .buffers = &output_buffer_desc, }; + const struct transform_desc + { + const struct attribute_desc *output_type_desc; + const struct attribute_desc *expect_output_type_desc; + const WCHAR *result_bitmap; + ULONG delta; + } + color_conversion_tests[] = + { + + { + /* YUV -> RGB */ + .output_type_desc = output_type_desc, + .expect_output_type_desc = expect_output_type_desc, + .result_bitmap = L"rgb32frame.bmp", + .delta = 4, /* Windows return 0, Wine needs 4 */ + }, + + { + /* YUV -> RGB (negative stride) */ + .output_type_desc = output_type_desc_negative_stride, + .expect_output_type_desc = expect_output_type_desc_negative_stride, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 6, + }, + + { + /* YUV -> RGB (positive stride) */ + .output_type_desc = output_type_desc_positive_stride, + .expect_output_type_desc = expect_output_type_desc, + .result_bitmap = L"rgb32frame.bmp", + .delta = 4, /* Windows return 0, Wine needs 4 */ + }, + + };
MFT_REGISTER_TYPE_INFO output_type = {MFMediaType_Video, MFVideoFormat_NV12}; MFT_REGISTER_TYPE_INFO input_type = {MFMediaType_Video, MFVideoFormat_I420}; @@ -5712,63 +5930,69 @@ static void test_color_convert(void) ok(hr == MF_E_NO_MORE_TYPES, "GetInputAvailableType returned %#lx\n", hr); ok(i == 20, "%lu input media types\n", i);
- check_mft_set_output_type_required(transform, output_type_desc); - check_mft_set_output_type(transform, output_type_desc, S_OK); - check_mft_get_output_current_type_(transform, expect_output_type_desc, FALSE, TRUE); - check_mft_set_input_type_required(transform, input_type_desc); check_mft_set_input_type(transform, input_type_desc); check_mft_get_input_current_type_(transform, expect_input_type_desc, FALSE, TRUE);
- check_mft_get_input_stream_info(transform, S_OK, &input_info); - check_mft_get_output_stream_info(transform, S_OK, &output_info); + for (i = 0; i < ARRAY_SIZE(color_conversion_tests); i++) + { + winetest_push_context("color conversion #%lu", i); + check_mft_set_output_type_required(transform, color_conversion_tests[i].output_type_desc); + check_mft_set_output_type(transform, color_conversion_tests[i].output_type_desc, S_OK); + check_mft_get_output_current_type_(transform, color_conversion_tests[i].expect_output_type_desc, FALSE, TRUE);
- load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); - /* skip BMP header and RGB data from the dump */ - length = *(DWORD *)(nv12frame_data + 2); - nv12frame_data_len = nv12frame_data_len - length; - nv12frame_data = nv12frame_data + length; - ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len); + check_mft_get_input_stream_info(transform, S_OK, &input_info); + check_mft_get_output_stream_info(transform, S_OK, &output_info);
- input_sample = create_sample(nv12frame_data, nv12frame_data_len); - hr = IMFSample_SetSampleTime(input_sample, 0); - ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); - hr = IMFSample_SetSampleDuration(input_sample, 10000000); - ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); - ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); - ret = IMFSample_Release(input_sample); - ok(ret <= 1, "Release returned %ld\n", ret); + load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); + /* skip BMP header and RGB data from the dump */ + length = *(DWORD *)(nv12frame_data + 2); + nv12frame_data_len = nv12frame_data_len - length; + nv12frame_data = nv12frame_data + length; + ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len);
- hr = MFCreateCollection(&output_samples); - ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + input_sample = create_sample(nv12frame_data, nv12frame_data_len); + hr = IMFSample_SetSampleTime(input_sample, 0); + ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); + hr = IMFSample_SetSampleDuration(input_sample, 10000000); + ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); + ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); + ret = IMFSample_Release(input_sample); + ok(ret <= 1, "Release returned %ld\n", ret);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); - hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); - ok(hr == S_OK, "AddElement returned %#lx\n", hr); - ref = IMFSample_Release(output_sample); - ok(ref == 1, "Release returned %ld\n", ref); + hr = MFCreateCollection(&output_samples); + ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr);
- ret = check_mf_sample_collection(output_samples, &output_sample_desc, L"rgb32frame.bmp"); - ok(ret <= 4 /* small and harmless diff in Wine vs Windows */, "got %lu%% diff\n", ret); - IMFCollection_Release(output_samples); + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status); + ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); + ok(hr == S_OK, "AddElement returned %#lx\n", hr); + ref = IMFSample_Release(output_sample); + ok(ref == 1, "Release returned %ld\n", ref);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); - hr = IMFSample_GetTotalLength(output_sample, &length); - ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); - ok(length == 0, "got length %lu\n", length); - ret = IMFSample_Release(output_sample); - ok(ret == 0, "Release returned %lu\n", ret); + ret = check_mf_sample_collection(output_samples, &output_sample_desc, color_conversion_tests[i].result_bitmap); + todo_wine_if(i == 1) + ok(ret <= color_conversion_tests[i].delta, "got %lu%% diff\n", ret); + IMFCollection_Release(output_samples); + + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status); + ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + hr = IMFSample_GetTotalLength(output_sample, &length); + ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); + ok(length == 0, "got length %lu\n", length); + ret = IMFSample_Release(output_sample); + ok(ret == 0, "Release returned %lu\n", ret); + winetest_pop_context(); + }
ret = IMFTransform_Release(transform); ok(ret == 0, "Release returned %ld\n", ret); @@ -5944,6 +6168,24 @@ static void test_video_processor(void) ATTR_BLOB(MF_MT_MINIMUM_DISPLAY_APERTURE, &actual_aperture, 16), {0}, }; + const struct attribute_desc output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_BLOB(MF_MT_MINIMUM_DISPLAY_APERTURE, &actual_aperture, 16), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + {0}, + }; + const struct attribute_desc output_type_desc_positive_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_BLOB(MF_MT_MINIMUM_DISPLAY_APERTURE, &actual_aperture, 16), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + {0}, + }; const MFT_OUTPUT_STREAM_INFO initial_output_info = {0}; const MFT_INPUT_STREAM_INFO initial_input_info = {0}; MFT_OUTPUT_STREAM_INFO output_info = {0}; @@ -5966,6 +6208,42 @@ static void test_video_processor(void) .buffer_count = 1, .buffers = &output_buffer_desc, };
+ const struct transform_desc + { + const struct attribute_desc *output_type_desc; + const struct attribute_desc *expect_output_type_desc; + const WCHAR *result_bitmap; + ULONG delta; + } + video_processor_tests[] = + { + + { + /* YUV -> RGB */ + .output_type_desc = output_type_desc, + .expect_output_type_desc = output_type_desc, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 0, + }, + + { + /* YUV -> RGB (negative stride) */ + .output_type_desc = output_type_desc_negative_stride, + .expect_output_type_desc = output_type_desc_negative_stride, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 0, + }, + + { + /* YUV -> RGB (positive stride) */ + .output_type_desc = output_type_desc_positive_stride, + .expect_output_type_desc = output_type_desc_positive_stride, + .result_bitmap = L"rgb32frame.bmp", + .delta = 6, + }, + + }; + MFT_REGISTER_TYPE_INFO output_type = {MFMediaType_Video, MFVideoFormat_NV12}; MFT_REGISTER_TYPE_INFO input_type = {MFMediaType_Video, MFVideoFormat_I420}; DWORD i, j, k, flags, length, output_status; @@ -6285,74 +6563,85 @@ static void test_video_processor(void) ok(hr == MF_E_NO_MORE_TYPES, "GetInputAvailableType returned %#lx\n", hr); ok(i == 22 || i == 30 || broken(i == 26) /* w1064v1507 */, "%lu input media types\n", i);
- check_mft_set_input_type_required(transform, input_type_desc); - check_mft_set_input_type(transform, input_type_desc); - check_mft_get_input_current_type(transform, input_type_desc); - - check_mft_set_output_type_required(transform, output_type_desc); - check_mft_set_output_type(transform, output_type_desc, S_OK); - check_mft_get_output_current_type(transform, output_type_desc); - - input_info.cbSize = actual_width * actual_height * 3 / 2; - output_info.cbSize = actual_width * actual_height * 4; - check_mft_get_input_stream_info(transform, S_OK, &input_info); - check_mft_get_output_stream_info(transform, S_OK, &output_info); + for (i = 0; i < ARRAY_SIZE(video_processor_tests); i++) + { + winetest_push_context("transform #%lu", i);
- load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); - /* skip BMP header and RGB data from the dump */ - length = *(DWORD *)(nv12frame_data + 2); - nv12frame_data_len = nv12frame_data_len - length; - nv12frame_data = nv12frame_data + length; - ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len); + check_mft_set_input_type_required(transform, input_type_desc); + check_mft_set_input_type(transform, input_type_desc); + check_mft_get_input_current_type(transform, input_type_desc);
- input_sample = create_sample(nv12frame_data, nv12frame_data_len); - hr = IMFSample_SetSampleTime(input_sample, 0); - ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); - hr = IMFSample_SetSampleDuration(input_sample, 10000000); - ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); - ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); - ret = IMFSample_Release(input_sample); - ok(ret <= 1, "Release returned %ld\n", ret); + check_mft_set_output_type_required(transform, video_processor_tests[i].output_type_desc); + check_mft_set_output_type(transform, video_processor_tests[i].output_type_desc, S_OK); + check_mft_get_output_current_type(transform, video_processor_tests[i].output_type_desc);
- hr = MFCreateCollection(&output_samples); - ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + input_info.cbSize = actual_width * actual_height * 3 / 2; + output_info.cbSize = actual_width * actual_height * 4; + check_mft_get_input_stream_info(transform, S_OK, &input_info); + check_mft_get_output_stream_info(transform, S_OK, &output_info);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == S_OK || broken(hr == MF_E_SHUTDOWN) /* w8 */, "ProcessOutput returned %#lx\n", hr); - if (hr != S_OK) - { - win_skip("ProcessOutput returned MF_E_SHUTDOWN, skipping tests.\n"); - goto skip_output; - } - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); + /* skip BMP header and RGB data from the dump */ + length = *(DWORD *)(nv12frame_data + 2); + nv12frame_data_len = nv12frame_data_len - length; + nv12frame_data = nv12frame_data + length; + ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len);
- hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); - ok(hr == S_OK, "AddElement returned %#lx\n", hr); - ref = IMFSample_Release(output_sample); - ok(ref == 1, "Release returned %ld\n", ref); + input_sample = create_sample(nv12frame_data, nv12frame_data_len); + hr = IMFSample_SetSampleTime(input_sample, 0); + ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); + hr = IMFSample_SetSampleDuration(input_sample, 10000000); + ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); + ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); + ret = IMFSample_Release(input_sample); + ok(ret <= 1, "Release returned %ld\n", ret);
- ret = check_mf_sample_collection(output_samples, &output_sample_desc, L"rgb32frame-vp.bmp"); - todo_wine - ok(ret == 0 || broken(ret == 25) /* w1064v1507 / w1064v1809 incorrectly rescale */, "got %lu%% diff\n", ret); - IMFCollection_Release(output_samples); + hr = MFCreateCollection(&output_samples); + ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); - hr = IMFSample_GetTotalLength(output_sample, &length); - ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); - ok(length == 0, "got length %lu\n", length); + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status);
-skip_output: - ret = IMFSample_Release(output_sample); - ok(ret == 0, "Release returned %lu\n", ret); + ok(hr == S_OK || broken(hr == MF_E_SHUTDOWN) /* w8 */, "ProcessOutput returned %#lx\n", hr); + if (hr != S_OK) + { + win_skip("ProcessOutput returned MF_E_SHUTDOWN, skipping tests.\n"); + } + else + { + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + + hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); + ok(hr == S_OK, "AddElement returned %#lx\n", hr); + ref = IMFSample_Release(output_sample); + ok(ref == 1, "Release returned %ld\n", ref); + + ret = check_mf_sample_collection(output_samples, &output_sample_desc, + video_processor_tests[i].result_bitmap); + todo_wine_if(i == 0 || i == 1) + ok(ret <= video_processor_tests[i].delta + /* w1064v1507 / w1064v1809 incorrectly rescale */ + || broken(ret == 25) || broken(ret == 32), + "got %lu%% diff\n", ret); + IMFCollection_Release(output_samples); + + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status); + ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + hr = IMFSample_GetTotalLength(output_sample, &length); + ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); + ok(length == 0, "got length %lu\n", length); + } + ret = IMFSample_Release(output_sample); + ok(ret == 0, "Release returned %lu\n", ret); + winetest_pop_context(); + }
ret = IMFTransform_Release(transform); ok(ret == 0, "Release returned %ld\n", ret);
From: Eric Pouech eric.pouech@gmail.com
The app I'm considering opens a video_processor on its own, with a NV12 format on input and a ARGB32 format on output.
Tested on Windows: the samples are flipped vertically. While Wine keeps them untouched.
So added a videoflip in the video processor to be activated when needed. This patch depends on MR!2159.
Signed-off-by: Eric Pouech epouech@codeweavers.com --- dlls/mf/tests/transform.c | 6 ++---- dlls/winegstreamer/color_convert.c | 28 ++++++++++++++++++++++++++++ dlls/winegstreamer/wg_transform.c | 23 +++++++++++++++++++++++ 3 files changed, 53 insertions(+), 4 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index fb1b0f6c9c6..34a00526eab 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -5978,7 +5978,6 @@ static void test_color_convert(void) ok(ref == 1, "Release returned %ld\n", ref);
ret = check_mf_sample_collection(output_samples, &output_sample_desc, color_conversion_tests[i].result_bitmap); - todo_wine_if(i == 1) ok(ret <= color_conversion_tests[i].delta, "got %lu%% diff\n", ret); IMFCollection_Release(output_samples);
@@ -6223,7 +6222,7 @@ static void test_video_processor(void) .output_type_desc = output_type_desc, .expect_output_type_desc = output_type_desc, .result_bitmap = L"rgb32frame-vp.bmp", - .delta = 0, + .delta = 2, /* Windows returns 0, Wine needs 2 */ },
{ @@ -6231,7 +6230,7 @@ static void test_video_processor(void) .output_type_desc = output_type_desc_negative_stride, .expect_output_type_desc = output_type_desc_negative_stride, .result_bitmap = L"rgb32frame-vp.bmp", - .delta = 0, + .delta = 2, /* Windows returns 0, Wine needs 2 */ },
{ @@ -6623,7 +6622,6 @@ static void test_video_processor(void)
ret = check_mf_sample_collection(output_samples, &output_sample_desc, video_processor_tests[i].result_bitmap); - todo_wine_if(i == 0 || i == 1) ok(ret <= video_processor_tests[i].delta /* w1064v1507 / w1064v1809 incorrectly rescale */ || broken(ret == 25) || broken(ret == 32), diff --git a/dlls/winegstreamer/color_convert.c b/dlls/winegstreamer/color_convert.c index 0eaddc687ee..ed591c8e669 100644 --- a/dlls/winegstreamer/color_convert.c +++ b/dlls/winegstreamer/color_convert.c @@ -363,6 +363,7 @@ static HRESULT WINAPI transform_SetInputType(IMFTransform *iface, DWORD id, IMFM struct color_convert *impl = impl_from_IMFTransform(iface); GUID major, subtype; UINT64 frame_size; + UINT32 stride; HRESULT hr; ULONG i;
@@ -392,6 +393,19 @@ static HRESULT WINAPI transform_SetInputType(IMFTransform *iface, DWORD id, IMFM IMFMediaType_Release(impl->input_type); impl->input_type = NULL; } + if (FAILED(IMFMediaType_GetUINT32(impl->input_type, &MF_MT_DEFAULT_STRIDE, &stride))) + { + if (FAILED(hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, frame_size >> 32, (LONG *)&stride))) + { + IMFMediaType_Release(impl->input_type); + impl->input_type = NULL; + } + if (FAILED(hr = IMFMediaType_SetUINT32(impl->input_type, &MF_MT_DEFAULT_STRIDE, abs((INT32)stride)))) + { + IMFMediaType_Release(impl->input_type); + impl->input_type = NULL; + } + }
if (impl->output_type && FAILED(hr = try_create_wg_transform(impl))) { @@ -411,6 +425,7 @@ static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMF struct color_convert *impl = impl_from_IMFTransform(iface); GUID major, subtype; UINT64 frame_size; + UINT32 stride; HRESULT hr; ULONG i;
@@ -440,6 +455,19 @@ static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMF IMFMediaType_Release(impl->output_type); impl->output_type = NULL; } + if (FAILED(IMFMediaType_GetUINT32(impl->output_type, &MF_MT_DEFAULT_STRIDE, &stride))) + { + if (FAILED(hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, frame_size >> 32, (LONG *)&stride))) + { + IMFMediaType_Release(impl->output_type); + impl->output_type = NULL; + } + if (FAILED(hr = IMFMediaType_SetUINT32(impl->output_type, &MF_MT_DEFAULT_STRIDE, abs((INT32)stride)))) + { + IMFMediaType_Release(impl->output_type); + impl->output_type = NULL; + } + }
if (impl->input_type && FAILED(hr = try_create_wg_transform(impl))) { diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index 7755dac2523..2f21e601e0b 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -52,6 +52,9 @@ struct wg_transform guint input_max_length; GstAtomicQueue *input_queue;
+ bool input_is_flipped; + GstElement *video_flip; + guint output_plane_align; struct wg_sample *output_wg_sample; GstAtomicQueue *output_queue; @@ -262,6 +265,11 @@ static struct wg_sample *transform_request_sample(gsize size, void *context) return InterlockedExchangePointer((void **)&transform->output_wg_sample, NULL); }
+static bool wg_format_video_is_flipped(const struct wg_format *format) +{ + return format->major_type == WG_MAJOR_TYPE_VIDEO && (format->u.video.height < 0); +} + NTSTATUS wg_transform_create(void *args) { struct wg_transform_create_params *params = args; @@ -382,6 +390,12 @@ NTSTATUS wg_transform_create(void *args)
case WG_MAJOR_TYPE_VIDEO: case WG_MAJOR_TYPE_VIDEO_WMV: + if (!(transform->video_flip = create_element("videoflip", "base")) + || !append_element(transform->container, transform->video_flip, &first, &last)) + goto out; + transform->input_is_flipped = wg_format_video_is_flipped(&input_format); + if (transform->input_is_flipped != wg_format_video_is_flipped(&output_format)) + gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", "vertical-flip"); if (!(element = create_element("videoconvert", "base")) || !append_element(transform->container, element, &first, &last)) goto out; @@ -492,6 +506,15 @@ NTSTATUS wg_transform_set_output_format(void *args) gst_caps_unref(transform->output_caps); transform->output_caps = caps;
+ if (transform->video_flip) + { + const char *value; + if (transform->input_is_flipped != wg_format_video_is_flipped(format)) + value = "vertical-flip"; + else + value = "none"; + gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", value); + } if (!gst_pad_push_event(transform->my_sink, gst_event_new_reconfigure())) { GST_ERROR("Failed to reconfigure transform %p.", transform);
V6: - rebased after merge of MR:2640 - included Rémi's latest comments
This merge request was approved by Rémi Bernon.
This merge request was approved by Zebediah Figura.