The app I'm considering opens a video_processor on its own, with a NV12 format on input and a ARGB32 format on output.
Tested on Windows: the samples are flipped vertically. While Wine keeps them untouched.
So added a videoflip in the video processor to be activated when needed. Current activation is based on RGB vs non RGB input/output formats.
Set as draft as if somehow related to MR!2159. Comments welcomed.
Signed-off-by: Eric Pouech epouech@codeweavers.com
-- v7: winegstreamer: In video_processor, activate a videoflip converter. mf/tests: Add tests about (negative) stride handling.
From: Eric Pouech epouech@codeweavers.com
Signed-off-by: Eric Pouech epouech@codeweavers.com --- dlls/mf/tests/transform.c | 579 ++++++++++++++++++++++++++++---------- 1 file changed, 434 insertions(+), 145 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index b41f87afabe..fb1b0f6c9c6 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -4971,6 +4971,37 @@ static void test_wmv_decoder(void) ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), {0}, }; + const struct attribute_desc output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_NV12, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width), + {0}, + }; + const struct attribute_desc output_type_desc_rgb[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + {0}, + }; + const struct attribute_desc output_type_desc_rgb_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + {0}, + }; + const struct attribute_desc output_type_desc_rgb_positive_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + {0}, + }; const struct attribute_desc expect_input_type_desc[] = { ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), @@ -4992,12 +5023,44 @@ static void test_wmv_decoder(void) ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), {0}, }; + const struct attribute_desc expect_output_type_desc_rgb[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + ATTR_UINT32(MF_MT_SAMPLE_SIZE, actual_width * actual_height * 4), + ATTR_UINT32(MF_MT_FIXED_SIZE_SAMPLES, 1), + ATTR_UINT32(MF_MT_VIDEO_NOMINAL_RANGE, 2), + ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), + ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), + {0}, + }; + const struct attribute_desc expect_output_type_desc_rgb_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + ATTR_UINT32(MF_MT_SAMPLE_SIZE, actual_width * actual_height * 4), + ATTR_UINT32(MF_MT_FIXED_SIZE_SAMPLES, 1), + ATTR_UINT32(MF_MT_VIDEO_NOMINAL_RANGE, 2), + ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), + ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), + {0}, + }; const MFT_OUTPUT_STREAM_INFO expect_output_info = { .dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER | MFT_OUTPUT_STREAM_DISCARDABLE, .cbSize = 0x3600, .cbAlignment = 1, }; + const MFT_OUTPUT_STREAM_INFO expect_output_info_rgb = + { + .dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER | MFT_OUTPUT_STREAM_DISCARDABLE, + .cbSize = 0x9000, + .cbAlignment = 1, + }; const MFT_OUTPUT_STREAM_INFO empty_output_info = { .dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER | MFT_OUTPUT_STREAM_DISCARDABLE, @@ -5007,6 +5070,11 @@ static void test_wmv_decoder(void) .cbSize = 0x3600, .cbAlignment = 1, }; + const MFT_INPUT_STREAM_INFO expect_input_info_rgb = + { + .cbSize = 0x9000, + .cbAlignment = 1, + };
const struct attribute_desc output_sample_attributes[] = { @@ -5018,12 +5086,92 @@ static void test_wmv_decoder(void) .length = actual_width * actual_height * 3 / 2, .compare = compare_nv12, .dump = dump_nv12, .rect = {.right = 82, .bottom = 84}, }; + const struct buffer_desc output_buffer_desc_rgb = + { + .length = actual_width * actual_height * 4, + .compare = compare_rgb32, .dump = dump_rgb32, .rect = {.right = 82, .bottom = 84}, + }; const struct sample_desc output_sample_desc_nv12 = { .attributes = output_sample_attributes, .sample_time = 0, .sample_duration = 333333, .buffer_count = 1, .buffers = &output_buffer_desc_nv12, }; + const struct sample_desc output_sample_desc_rgb = + { + .attributes = output_sample_attributes, + .sample_time = 0, .sample_duration = 333333, + .buffer_count = 1, .buffers = &output_buffer_desc_rgb, + }; + + const struct transform_desc + { + const struct attribute_desc *output_type_desc; + const struct attribute_desc *expect_output_type_desc; + const MFT_INPUT_STREAM_INFO *expect_input_info; + const MFT_OUTPUT_STREAM_INFO *expect_output_info; + const struct sample_desc *output_sample_desc; + const WCHAR *result_bitmap; + ULONG delta; + } + transform_tests[] = + { + { + /* WMV1 -> YUV */ + .output_type_desc = output_type_desc, + .expect_output_type_desc = expect_output_type_desc, + .expect_input_info = &expect_input_info, + .expect_output_info = &expect_output_info, + .output_sample_desc = &output_sample_desc_nv12, + .result_bitmap = L"nv12frame.bmp", + .delta = 0, + }, + + { + /* WMV1 -> YUV (negative stride) */ + .output_type_desc = output_type_desc_negative_stride, + .expect_output_type_desc = expect_output_type_desc, + .expect_input_info = &expect_input_info, + .expect_output_info = &expect_output_info, + .output_sample_desc = &output_sample_desc_nv12, + .result_bitmap = L"nv12frame.bmp", + .delta = 0, + }, + + { + /* WMV1 -> RGB */ + .output_type_desc = output_type_desc_rgb, + .expect_output_type_desc = expect_output_type_desc_rgb, + .expect_input_info = &expect_input_info_rgb, + .expect_output_info = &expect_output_info_rgb, + .output_sample_desc = &output_sample_desc_rgb, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 5, + }, + + { + /* WMV1 -> RGB (negative stride) */ + .output_type_desc = output_type_desc_rgb_negative_stride, + .expect_output_type_desc = expect_output_type_desc_rgb_negative_stride, + .expect_input_info = &expect_input_info_rgb, + .expect_output_info = &expect_output_info_rgb, + .output_sample_desc = &output_sample_desc_rgb, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 5, + }, + + { + /* WMV1 -> RGB (positive stride */ + .output_type_desc = output_type_desc_rgb_positive_stride, + .expect_output_type_desc = expect_output_type_desc_rgb, + .expect_input_info = &expect_input_info_rgb, + .expect_output_info = &expect_output_info_rgb, + .output_sample_desc = &output_sample_desc_rgb, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 5, + }, + + };
MFT_REGISTER_TYPE_INFO output_type = {MFMediaType_Video, MFVideoFormat_NV12}; MFT_REGISTER_TYPE_INFO input_type = {MFMediaType_Video, MFVideoFormat_WMV1}; @@ -5034,7 +5182,7 @@ static void test_wmv_decoder(void) const BYTE *wmvenc_data; ULONG wmvenc_data_len; DWORD output_status; - ULONG i, ret, ref; + ULONG i, j, ret, ref; HRESULT hr;
hr = CoInitialize(NULL); @@ -5114,50 +5262,57 @@ static void test_wmv_decoder(void) ok(hr == MF_E_NO_MORE_TYPES, "GetOutputAvailableType returned %#lx\n", hr); ok(i == ARRAY_SIZE(expect_available_outputs), "%lu input media types\n", i);
- check_mft_set_output_type_required(transform, output_type_desc); - check_mft_set_output_type(transform, output_type_desc, S_OK); - check_mft_get_output_current_type_(transform, expect_output_type_desc, FALSE, TRUE); + for (j = 0; j < ARRAY_SIZE(transform_tests); j++) + { + winetest_push_context("transform #%lu", j);
- check_mft_get_input_stream_info(transform, S_OK, &expect_input_info); - check_mft_get_output_stream_info(transform, S_OK, &expect_output_info); + check_mft_set_output_type_required(transform, transform_tests[j].output_type_desc); + check_mft_set_output_type(transform, transform_tests[j].output_type_desc, S_OK); + check_mft_get_output_current_type_(transform, transform_tests[j].expect_output_type_desc, FALSE, TRUE);
- load_resource(L"wmvencdata.bin", &wmvenc_data, &wmvenc_data_len); + check_mft_get_input_stream_info(transform, S_OK, transform_tests[j].expect_input_info); + check_mft_get_output_stream_info(transform, S_OK, transform_tests[j].expect_output_info);
- input_sample = create_sample(wmvenc_data + sizeof(DWORD), *(DWORD *)wmvenc_data); - wmvenc_data_len -= *(DWORD *)wmvenc_data + sizeof(DWORD); - wmvenc_data += *(DWORD *)wmvenc_data + sizeof(DWORD); - hr = IMFSample_SetSampleTime(input_sample, 0); - ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); - hr = IMFSample_SetSampleDuration(input_sample, 333333); - ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); - ret = IMFSample_Release(input_sample); - ok(ret <= 1, "Release returned %ld\n", ret); + load_resource(L"wmvencdata.bin", &wmvenc_data, &wmvenc_data_len);
- hr = MFCreateCollection(&output_samples); - ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + input_sample = create_sample(wmvenc_data + sizeof(DWORD), *(DWORD *)wmvenc_data); + wmvenc_data_len -= *(DWORD *)wmvenc_data + sizeof(DWORD); + wmvenc_data += *(DWORD *)wmvenc_data + sizeof(DWORD); + hr = IMFSample_SetSampleTime(input_sample, 0); + ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); + hr = IMFSample_SetSampleDuration(input_sample, 333333); + ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); + ret = IMFSample_Release(input_sample); + ok(ret <= 1, "Release returned %ld\n", ret);
- output_sample = create_sample(NULL, expect_output_info.cbSize); - for (i = 0; SUCCEEDED(hr = check_mft_process_output(transform, output_sample, &output_status)); i++) - { - winetest_push_context("%lu", i); - ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); - hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); - ok(hr == S_OK, "AddElement returned %#lx\n", hr); - ref = IMFSample_Release(output_sample); - ok(ref == 1, "Release returned %ld\n", ref); - output_sample = create_sample(NULL, expect_output_info.cbSize); + hr = MFCreateCollection(&output_samples); + ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + + output_sample = create_sample(NULL, transform_tests[j].expect_output_info->cbSize); + for (i = 0; SUCCEEDED(hr = check_mft_process_output(transform, output_sample, &output_status)); i++) + { + winetest_push_context("%lu", i); + ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); + hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); + ok(hr == S_OK, "AddElement returned %#lx\n", hr); + ref = IMFSample_Release(output_sample); + ok(ref == 1, "Release returned %ld\n", ref); + output_sample = create_sample(NULL, transform_tests[j].expect_output_info->cbSize); + winetest_pop_context(); + } + ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); + ret = IMFSample_Release(output_sample); + ok(ret == 0, "Release returned %lu\n", ret); + ok(i == 1, "got %lu output samples\n", i); + + ret = check_mf_sample_collection(output_samples, transform_tests[j].output_sample_desc, + transform_tests[j].result_bitmap); + ok(ret <= transform_tests[j].delta, "got %lu%% diff\n", ret); + IMFCollection_Release(output_samples); winetest_pop_context(); } - ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); - ret = IMFSample_Release(output_sample); - ok(ret == 0, "Release returned %lu\n", ret); - ok(i == 1, "got %lu output samples\n", i); - - ret = check_mf_sample_collection(output_samples, &output_sample_desc_nv12, L"nv12frame.bmp"); - ok(ret == 0, "got %lu%% diff\n", ret); - IMFCollection_Release(output_samples);
skip_tests: ret = IMFTransform_Release(transform); @@ -5591,6 +5746,22 @@ static void test_color_convert(void) ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), {0}, }; + const struct attribute_desc output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + {0}, + }; + const struct attribute_desc output_type_desc_positive_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + {0}, + }; const struct attribute_desc expect_input_type_desc[] = { ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), @@ -5616,6 +5787,18 @@ static void test_color_convert(void) ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), {0}, }; + const struct attribute_desc expect_output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + ATTR_UINT32(MF_MT_SAMPLE_SIZE, actual_width * actual_height * 4), + ATTR_UINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1), + ATTR_UINT32(MF_MT_FIXED_SIZE_SAMPLES, 1), + ATTR_RATIO(MF_MT_PIXEL_ASPECT_RATIO, 1, 1), + {0}, + }; const MFT_OUTPUT_STREAM_INFO output_info = { .cbSize = actual_width * actual_height * 4, @@ -5643,6 +5826,41 @@ static void test_color_convert(void) .sample_time = 0, .sample_duration = 10000000, .buffer_count = 1, .buffers = &output_buffer_desc, }; + const struct transform_desc + { + const struct attribute_desc *output_type_desc; + const struct attribute_desc *expect_output_type_desc; + const WCHAR *result_bitmap; + ULONG delta; + } + color_conversion_tests[] = + { + + { + /* YUV -> RGB */ + .output_type_desc = output_type_desc, + .expect_output_type_desc = expect_output_type_desc, + .result_bitmap = L"rgb32frame.bmp", + .delta = 4, /* Windows return 0, Wine needs 4 */ + }, + + { + /* YUV -> RGB (negative stride) */ + .output_type_desc = output_type_desc_negative_stride, + .expect_output_type_desc = expect_output_type_desc_negative_stride, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 6, + }, + + { + /* YUV -> RGB (positive stride) */ + .output_type_desc = output_type_desc_positive_stride, + .expect_output_type_desc = expect_output_type_desc, + .result_bitmap = L"rgb32frame.bmp", + .delta = 4, /* Windows return 0, Wine needs 4 */ + }, + + };
MFT_REGISTER_TYPE_INFO output_type = {MFMediaType_Video, MFVideoFormat_NV12}; MFT_REGISTER_TYPE_INFO input_type = {MFMediaType_Video, MFVideoFormat_I420}; @@ -5712,63 +5930,69 @@ static void test_color_convert(void) ok(hr == MF_E_NO_MORE_TYPES, "GetInputAvailableType returned %#lx\n", hr); ok(i == 20, "%lu input media types\n", i);
- check_mft_set_output_type_required(transform, output_type_desc); - check_mft_set_output_type(transform, output_type_desc, S_OK); - check_mft_get_output_current_type_(transform, expect_output_type_desc, FALSE, TRUE); - check_mft_set_input_type_required(transform, input_type_desc); check_mft_set_input_type(transform, input_type_desc); check_mft_get_input_current_type_(transform, expect_input_type_desc, FALSE, TRUE);
- check_mft_get_input_stream_info(transform, S_OK, &input_info); - check_mft_get_output_stream_info(transform, S_OK, &output_info); + for (i = 0; i < ARRAY_SIZE(color_conversion_tests); i++) + { + winetest_push_context("color conversion #%lu", i); + check_mft_set_output_type_required(transform, color_conversion_tests[i].output_type_desc); + check_mft_set_output_type(transform, color_conversion_tests[i].output_type_desc, S_OK); + check_mft_get_output_current_type_(transform, color_conversion_tests[i].expect_output_type_desc, FALSE, TRUE);
- load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); - /* skip BMP header and RGB data from the dump */ - length = *(DWORD *)(nv12frame_data + 2); - nv12frame_data_len = nv12frame_data_len - length; - nv12frame_data = nv12frame_data + length; - ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len); + check_mft_get_input_stream_info(transform, S_OK, &input_info); + check_mft_get_output_stream_info(transform, S_OK, &output_info);
- input_sample = create_sample(nv12frame_data, nv12frame_data_len); - hr = IMFSample_SetSampleTime(input_sample, 0); - ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); - hr = IMFSample_SetSampleDuration(input_sample, 10000000); - ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); - ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); - ret = IMFSample_Release(input_sample); - ok(ret <= 1, "Release returned %ld\n", ret); + load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); + /* skip BMP header and RGB data from the dump */ + length = *(DWORD *)(nv12frame_data + 2); + nv12frame_data_len = nv12frame_data_len - length; + nv12frame_data = nv12frame_data + length; + ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len);
- hr = MFCreateCollection(&output_samples); - ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + input_sample = create_sample(nv12frame_data, nv12frame_data_len); + hr = IMFSample_SetSampleTime(input_sample, 0); + ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); + hr = IMFSample_SetSampleDuration(input_sample, 10000000); + ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); + ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); + ret = IMFSample_Release(input_sample); + ok(ret <= 1, "Release returned %ld\n", ret);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); - hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); - ok(hr == S_OK, "AddElement returned %#lx\n", hr); - ref = IMFSample_Release(output_sample); - ok(ref == 1, "Release returned %ld\n", ref); + hr = MFCreateCollection(&output_samples); + ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr);
- ret = check_mf_sample_collection(output_samples, &output_sample_desc, L"rgb32frame.bmp"); - ok(ret <= 4 /* small and harmless diff in Wine vs Windows */, "got %lu%% diff\n", ret); - IMFCollection_Release(output_samples); + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status); + ok(hr == S_OK, "ProcessOutput returned %#lx\n", hr); + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); + ok(hr == S_OK, "AddElement returned %#lx\n", hr); + ref = IMFSample_Release(output_sample); + ok(ref == 1, "Release returned %ld\n", ref);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); - hr = IMFSample_GetTotalLength(output_sample, &length); - ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); - ok(length == 0, "got length %lu\n", length); - ret = IMFSample_Release(output_sample); - ok(ret == 0, "Release returned %lu\n", ret); + ret = check_mf_sample_collection(output_samples, &output_sample_desc, color_conversion_tests[i].result_bitmap); + todo_wine_if(i == 1) + ok(ret <= color_conversion_tests[i].delta, "got %lu%% diff\n", ret); + IMFCollection_Release(output_samples); + + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status); + ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + hr = IMFSample_GetTotalLength(output_sample, &length); + ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); + ok(length == 0, "got length %lu\n", length); + ret = IMFSample_Release(output_sample); + ok(ret == 0, "Release returned %lu\n", ret); + winetest_pop_context(); + }
ret = IMFTransform_Release(transform); ok(ret == 0, "Release returned %ld\n", ret); @@ -5944,6 +6168,24 @@ static void test_video_processor(void) ATTR_BLOB(MF_MT_MINIMUM_DISPLAY_APERTURE, &actual_aperture, 16), {0}, }; + const struct attribute_desc output_type_desc_negative_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_BLOB(MF_MT_MINIMUM_DISPLAY_APERTURE, &actual_aperture, 16), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, -actual_width * 4), + {0}, + }; + const struct attribute_desc output_type_desc_positive_stride[] = + { + ATTR_GUID(MF_MT_MAJOR_TYPE, MFMediaType_Video, .required = TRUE), + ATTR_GUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32, .required = TRUE), + ATTR_RATIO(MF_MT_FRAME_SIZE, actual_width, actual_height, .required = TRUE), + ATTR_BLOB(MF_MT_MINIMUM_DISPLAY_APERTURE, &actual_aperture, 16), + ATTR_UINT32(MF_MT_DEFAULT_STRIDE, actual_width * 4), + {0}, + }; const MFT_OUTPUT_STREAM_INFO initial_output_info = {0}; const MFT_INPUT_STREAM_INFO initial_input_info = {0}; MFT_OUTPUT_STREAM_INFO output_info = {0}; @@ -5966,6 +6208,42 @@ static void test_video_processor(void) .buffer_count = 1, .buffers = &output_buffer_desc, };
+ const struct transform_desc + { + const struct attribute_desc *output_type_desc; + const struct attribute_desc *expect_output_type_desc; + const WCHAR *result_bitmap; + ULONG delta; + } + video_processor_tests[] = + { + + { + /* YUV -> RGB */ + .output_type_desc = output_type_desc, + .expect_output_type_desc = output_type_desc, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 0, + }, + + { + /* YUV -> RGB (negative stride) */ + .output_type_desc = output_type_desc_negative_stride, + .expect_output_type_desc = output_type_desc_negative_stride, + .result_bitmap = L"rgb32frame-vp.bmp", + .delta = 0, + }, + + { + /* YUV -> RGB (positive stride) */ + .output_type_desc = output_type_desc_positive_stride, + .expect_output_type_desc = output_type_desc_positive_stride, + .result_bitmap = L"rgb32frame.bmp", + .delta = 6, + }, + + }; + MFT_REGISTER_TYPE_INFO output_type = {MFMediaType_Video, MFVideoFormat_NV12}; MFT_REGISTER_TYPE_INFO input_type = {MFMediaType_Video, MFVideoFormat_I420}; DWORD i, j, k, flags, length, output_status; @@ -6285,74 +6563,85 @@ static void test_video_processor(void) ok(hr == MF_E_NO_MORE_TYPES, "GetInputAvailableType returned %#lx\n", hr); ok(i == 22 || i == 30 || broken(i == 26) /* w1064v1507 */, "%lu input media types\n", i);
- check_mft_set_input_type_required(transform, input_type_desc); - check_mft_set_input_type(transform, input_type_desc); - check_mft_get_input_current_type(transform, input_type_desc); - - check_mft_set_output_type_required(transform, output_type_desc); - check_mft_set_output_type(transform, output_type_desc, S_OK); - check_mft_get_output_current_type(transform, output_type_desc); - - input_info.cbSize = actual_width * actual_height * 3 / 2; - output_info.cbSize = actual_width * actual_height * 4; - check_mft_get_input_stream_info(transform, S_OK, &input_info); - check_mft_get_output_stream_info(transform, S_OK, &output_info); + for (i = 0; i < ARRAY_SIZE(video_processor_tests); i++) + { + winetest_push_context("transform #%lu", i);
- load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); - /* skip BMP header and RGB data from the dump */ - length = *(DWORD *)(nv12frame_data + 2); - nv12frame_data_len = nv12frame_data_len - length; - nv12frame_data = nv12frame_data + length; - ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len); + check_mft_set_input_type_required(transform, input_type_desc); + check_mft_set_input_type(transform, input_type_desc); + check_mft_get_input_current_type(transform, input_type_desc);
- input_sample = create_sample(nv12frame_data, nv12frame_data_len); - hr = IMFSample_SetSampleTime(input_sample, 0); - ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); - hr = IMFSample_SetSampleDuration(input_sample, 10000000); - ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); - ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); - hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); - ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); - ret = IMFSample_Release(input_sample); - ok(ret <= 1, "Release returned %ld\n", ret); + check_mft_set_output_type_required(transform, video_processor_tests[i].output_type_desc); + check_mft_set_output_type(transform, video_processor_tests[i].output_type_desc, S_OK); + check_mft_get_output_current_type(transform, video_processor_tests[i].output_type_desc);
- hr = MFCreateCollection(&output_samples); - ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr); + input_info.cbSize = actual_width * actual_height * 3 / 2; + output_info.cbSize = actual_width * actual_height * 4; + check_mft_get_input_stream_info(transform, S_OK, &input_info); + check_mft_get_output_stream_info(transform, S_OK, &output_info);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == S_OK || broken(hr == MF_E_SHUTDOWN) /* w8 */, "ProcessOutput returned %#lx\n", hr); - if (hr != S_OK) - { - win_skip("ProcessOutput returned MF_E_SHUTDOWN, skipping tests.\n"); - goto skip_output; - } - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + load_resource(L"nv12frame.bmp", &nv12frame_data, &nv12frame_data_len); + /* skip BMP header and RGB data from the dump */ + length = *(DWORD *)(nv12frame_data + 2); + nv12frame_data_len = nv12frame_data_len - length; + nv12frame_data = nv12frame_data + length; + ok(nv12frame_data_len == 13824, "got length %lu\n", nv12frame_data_len);
- hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); - ok(hr == S_OK, "AddElement returned %#lx\n", hr); - ref = IMFSample_Release(output_sample); - ok(ref == 1, "Release returned %ld\n", ref); + input_sample = create_sample(nv12frame_data, nv12frame_data_len); + hr = IMFSample_SetSampleTime(input_sample, 0); + ok(hr == S_OK, "SetSampleTime returned %#lx\n", hr); + hr = IMFSample_SetSampleDuration(input_sample, 10000000); + ok(hr == S_OK, "SetSampleDuration returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == S_OK, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessInput(transform, 0, input_sample, 0); + ok(hr == MF_E_NOTACCEPTING, "ProcessInput returned %#lx\n", hr); + hr = IMFTransform_ProcessMessage(transform, MFT_MESSAGE_COMMAND_DRAIN, 0); + ok(hr == S_OK, "ProcessMessage returned %#lx\n", hr); + ret = IMFSample_Release(input_sample); + ok(ret <= 1, "Release returned %ld\n", ret);
- ret = check_mf_sample_collection(output_samples, &output_sample_desc, L"rgb32frame-vp.bmp"); - todo_wine - ok(ret == 0 || broken(ret == 25) /* w1064v1507 / w1064v1809 incorrectly rescale */, "got %lu%% diff\n", ret); - IMFCollection_Release(output_samples); + hr = MFCreateCollection(&output_samples); + ok(hr == S_OK, "MFCreateCollection returned %#lx\n", hr);
- output_sample = create_sample(NULL, output_info.cbSize); - hr = check_mft_process_output(transform, output_sample, &output_status); - ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); - ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); - hr = IMFSample_GetTotalLength(output_sample, &length); - ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); - ok(length == 0, "got length %lu\n", length); + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status);
-skip_output: - ret = IMFSample_Release(output_sample); - ok(ret == 0, "Release returned %lu\n", ret); + ok(hr == S_OK || broken(hr == MF_E_SHUTDOWN) /* w8 */, "ProcessOutput returned %#lx\n", hr); + if (hr != S_OK) + { + win_skip("ProcessOutput returned MF_E_SHUTDOWN, skipping tests.\n"); + } + else + { + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + + hr = IMFCollection_AddElement(output_samples, (IUnknown *)output_sample); + ok(hr == S_OK, "AddElement returned %#lx\n", hr); + ref = IMFSample_Release(output_sample); + ok(ref == 1, "Release returned %ld\n", ref); + + ret = check_mf_sample_collection(output_samples, &output_sample_desc, + video_processor_tests[i].result_bitmap); + todo_wine_if(i == 0 || i == 1) + ok(ret <= video_processor_tests[i].delta + /* w1064v1507 / w1064v1809 incorrectly rescale */ + || broken(ret == 25) || broken(ret == 32), + "got %lu%% diff\n", ret); + IMFCollection_Release(output_samples); + + output_sample = create_sample(NULL, output_info.cbSize); + hr = check_mft_process_output(transform, output_sample, &output_status); + ok(hr == MF_E_TRANSFORM_NEED_MORE_INPUT, "ProcessOutput returned %#lx\n", hr); + ok(output_status == 0, "got output[0].dwStatus %#lx\n", output_status); + hr = IMFSample_GetTotalLength(output_sample, &length); + ok(hr == S_OK, "GetTotalLength returned %#lx\n", hr); + ok(length == 0, "got length %lu\n", length); + } + ret = IMFSample_Release(output_sample); + ok(ret == 0, "Release returned %lu\n", ret); + winetest_pop_context(); + }
ret = IMFTransform_Release(transform); ok(ret == 0, "Release returned %ld\n", ret);
From: Eric Pouech eric.pouech@gmail.com
The app I'm considering opens a video_processor on its own, with a NV12 format on input and a ARGB32 format on output.
Tested on Windows: the samples are flipped vertically. While Wine keeps them untouched.
So added a videoflip in the video processor to be activated when needed. This patch depends on MR!2159.
Signed-off-by: Eric Pouech epouech@codeweavers.com --- dlls/mf/tests/transform.c | 7 ++----- dlls/winegstreamer/color_convert.c | 30 ++++++++++++++++++++++++++++++ dlls/winegstreamer/video_decoder.c | 2 -- dlls/winegstreamer/wg_transform.c | 23 +++++++++++++++++++++++ 4 files changed, 55 insertions(+), 7 deletions(-)
diff --git a/dlls/mf/tests/transform.c b/dlls/mf/tests/transform.c index fb1b0f6c9c6..e46c2d47f1e 100644 --- a/dlls/mf/tests/transform.c +++ b/dlls/mf/tests/transform.c @@ -739,7 +739,6 @@ static void check_mft_get_output_current_type_(IMFTransform *transform, const st ok(hr == S_OK, "Compare returned hr %#lx.\n", hr); todo_wine_if(todo_compare) ok(result, "got result %u.\n", !!result); - IMFMediaType_Release(media_type); IMFMediaType_Release(current_type); } @@ -5978,7 +5977,6 @@ static void test_color_convert(void) ok(ref == 1, "Release returned %ld\n", ref);
ret = check_mf_sample_collection(output_samples, &output_sample_desc, color_conversion_tests[i].result_bitmap); - todo_wine_if(i == 1) ok(ret <= color_conversion_tests[i].delta, "got %lu%% diff\n", ret); IMFCollection_Release(output_samples);
@@ -6223,7 +6221,7 @@ static void test_video_processor(void) .output_type_desc = output_type_desc, .expect_output_type_desc = output_type_desc, .result_bitmap = L"rgb32frame-vp.bmp", - .delta = 0, + .delta = 2, /* Windows returns 0, Wine needs 2 */ },
{ @@ -6231,7 +6229,7 @@ static void test_video_processor(void) .output_type_desc = output_type_desc_negative_stride, .expect_output_type_desc = output_type_desc_negative_stride, .result_bitmap = L"rgb32frame-vp.bmp", - .delta = 0, + .delta = 2, /* Windows returns 0, Wine needs 2 */ },
{ @@ -6623,7 +6621,6 @@ static void test_video_processor(void)
ret = check_mf_sample_collection(output_samples, &output_sample_desc, video_processor_tests[i].result_bitmap); - todo_wine_if(i == 0 || i == 1) ok(ret <= video_processor_tests[i].delta /* w1064v1507 / w1064v1809 incorrectly rescale */ || broken(ret == 25) || broken(ret == 32), diff --git a/dlls/winegstreamer/color_convert.c b/dlls/winegstreamer/color_convert.c index 0eaddc687ee..e55a79fb3bc 100644 --- a/dlls/winegstreamer/color_convert.c +++ b/dlls/winegstreamer/color_convert.c @@ -363,6 +363,7 @@ static HRESULT WINAPI transform_SetInputType(IMFTransform *iface, DWORD id, IMFM struct color_convert *impl = impl_from_IMFTransform(iface); GUID major, subtype; UINT64 frame_size; + UINT32 stride; HRESULT hr; ULONG i;
@@ -392,6 +393,20 @@ static HRESULT WINAPI transform_SetInputType(IMFTransform *iface, DWORD id, IMFM IMFMediaType_Release(impl->input_type); impl->input_type = NULL; } + if (FAILED(IMFMediaType_GetUINT32(impl->input_type, &MF_MT_DEFAULT_STRIDE, &stride))) + { + if (FAILED(hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, frame_size >> 32, (LONG *)&stride))) + { + IMFMediaType_Release(impl->input_type); + impl->input_type = NULL; + } + if ((INT32)stride < 0) stride = -stride; + if (FAILED(hr = IMFMediaType_SetUINT32(impl->input_type, &MF_MT_DEFAULT_STRIDE, stride))) + { + IMFMediaType_Release(impl->input_type); + impl->input_type = NULL; + } + }
if (impl->output_type && FAILED(hr = try_create_wg_transform(impl))) { @@ -411,6 +426,7 @@ static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMF struct color_convert *impl = impl_from_IMFTransform(iface); GUID major, subtype; UINT64 frame_size; + UINT32 stride; HRESULT hr; ULONG i;
@@ -440,6 +456,20 @@ static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMF IMFMediaType_Release(impl->output_type); impl->output_type = NULL; } + if (FAILED(IMFMediaType_GetUINT32(impl->output_type, &MF_MT_DEFAULT_STRIDE, &stride))) + { + if (FAILED(hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, frame_size >> 32, (LONG *)&stride))) + { + IMFMediaType_Release(impl->output_type); + impl->output_type = NULL; + } + if ((INT32)stride < 0) stride = -stride; + if (FAILED(hr = IMFMediaType_SetUINT32(impl->output_type, &MF_MT_DEFAULT_STRIDE, stride))) + { + IMFMediaType_Release(impl->output_type); + impl->output_type = NULL; + } + }
if (impl->input_type && FAILED(hr = try_create_wg_transform(impl))) { diff --git a/dlls/winegstreamer/video_decoder.c b/dlls/winegstreamer/video_decoder.c index 66df8173038..0087e12abe3 100644 --- a/dlls/winegstreamer/video_decoder.c +++ b/dlls/winegstreamer/video_decoder.c @@ -310,8 +310,6 @@ static HRESULT WINAPI transform_SetOutputType(IMFTransform *iface, DWORD id, IMF { mf_media_type_to_wg_format(decoder->output_type, &output_format);
- output_format.u.video.width = frame_size >> 32; - output_format.u.video.height = (UINT32)frame_size; output_format.u.video.fps_d = 0; output_format.u.video.fps_n = 0;
diff --git a/dlls/winegstreamer/wg_transform.c b/dlls/winegstreamer/wg_transform.c index ccdd90361fc..3cd0ae1e728 100644 --- a/dlls/winegstreamer/wg_transform.c +++ b/dlls/winegstreamer/wg_transform.c @@ -56,6 +56,9 @@ struct wg_transform guint input_max_length; GstAtomicQueue *input_queue;
+ bool input_is_flipped; + GstElement *video_flip; + guint output_plane_align; struct wg_sample *output_wg_sample; GstAtomicQueue *output_queue; @@ -347,6 +350,11 @@ static struct wg_sample *transform_request_sample(gsize size, void *context) return InterlockedExchangePointer((void **)&transform->output_wg_sample, NULL); }
+static bool wg_format_video_is_flipped(const struct wg_format *format) +{ + return format->major_type == WG_MAJOR_TYPE_VIDEO && (format->u.video.height < 0); +} + NTSTATUS wg_transform_create(void *args) { struct wg_transform_create_params *params = args; @@ -470,6 +478,12 @@ NTSTATUS wg_transform_create(void *args)
case WG_MAJOR_TYPE_VIDEO: case WG_MAJOR_TYPE_VIDEO_WMV: + if (!(transform->video_flip = create_element("videoflip", "base")) + || !transform_append_element(transform, transform->video_flip, &first, &last)) + goto out; + transform->input_is_flipped = wg_format_video_is_flipped(&input_format); + if (transform->input_is_flipped != wg_format_video_is_flipped(&output_format)) + gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", "vertical-flip"); if (!(element = create_element("videoconvert", "base")) || !transform_append_element(transform, element, &first, &last)) goto out; @@ -588,6 +602,15 @@ NTSTATUS wg_transform_set_output_format(void *args) gst_caps_unref(transform->output_caps); transform->output_caps = caps;
+ if (transform->video_flip) + { + const char *value; + if (transform->input_is_flipped != wg_format_video_is_flipped(format)) + value = "vertical-flip"; + else + value = "none"; + gst_util_set_object_arg(G_OBJECT(transform->video_flip), "method", value); + } if (!gst_pad_push_event(transform->my_sink, gst_event_new_reconfigure())) { GST_ERROR("Failed to reconfigure transform %p.", transform);
On Fri Apr 14 11:09:14 2023 +0000, eric pouech wrote:
changed this line in [version 7 of the diff](/wine/wine/-/merge_requests/2471/diffs?diff_id=42441&start_sha=a96f782faaf435a798cc4332fba58b6633ce3d65#63c97124ef2b8ff947cc77ab3bcf02cf8a5b6b5f_907_904)
V5 pushed. V4 => V5: - Test - factorize most of the tests for various stride attributes - remove redundant const structures - Implementation - injecting MF_MT_DEFAULT_STRIDE attribute in color_conversion - this allows keeping mf_media_type_to_wg_format_video() unchanged
On Fri Apr 14 11:09:13 2023 +0000, eric pouech wrote:
changed this line in [version 7 of the diff](/wine/wine/-/merge_requests/2471/diffs?diff_id=42441&start_sha=a96f782faaf435a798cc4332fba58b6633ce3d65#edc603511ffb77e62710c687a33e3357cffc13b4_5308_5264)
factorization done in V5 (only factorize into each test_() function, even if the used test description are (mostly) the same...)
On Fri Apr 14 11:09:12 2023 +0000, eric pouech wrote:
changed this line in [version 7 of the diff](/wine/wine/-/merge_requests/2471/diffs?diff_id=42441&start_sha=a96f782faaf435a798cc4332fba58b6633ce3d65#edc603511ffb77e62710c687a33e3357cffc13b4_5938_5789)
done in V5
On Fri Apr 14 11:09:11 2023 +0000, eric pouech wrote:
changed this line in [version 7 of the diff](/wine/wine/-/merge_requests/2471/diffs?diff_id=42441&start_sha=a96f782faaf435a798cc4332fba58b6633ce3d65#edc603511ffb77e62710c687a33e3357cffc13b4_5421_5272)
done in V5
On Fri Apr 14 11:09:10 2023 +0000, eric pouech wrote:
changed this line in [version 7 of the diff](/wine/wine/-/merge_requests/2471/diffs?diff_id=42441&start_sha=a96f782faaf435a798cc4332fba58b6633ce3d65#edc603511ffb77e62710c687a33e3357cffc13b4_5214_5264)
actually, Windows 8 fails if we don't reset the input type as well in video_processor. So I kept it :-(
On Fri Apr 14 06:46:21 2023 +0000, eric pouech wrote:
If I understand the tests correctly, wmvdecode, color convert and video processor don't handle the MF_MT_DEFAULT_STRIDE attribute (or its absence) the same way
| | | | MF output format | MF output format w/ | MF output format w/ | | | From | To | w/o stride | positive stride | negative stride | |-----------------+------+-----+------------------+---------------------+---------------------| | decode | WMV | YUV | no flip | no flip | no flip | | decode | WMV | RGB | flip | flip | flip | | color conv | YUV | RGB | no flip | no flip | flip | | video processor | YUV | RGB | flip | no flip | flip |
So I tried to keep in mf_media_type_to_wg_format_video() what's look generic, and moved to each wg_transform() callers the specific handling.
rewrote it differently in V5 (following Rémi's advice). No longer need to change mf_media_type_to_wg_format_video().
Rémi Bernon (@rbernon) commented about dlls/winegstreamer/video_decoder.c:
{ mf_media_type_to_wg_format(decoder->output_type, &output_format);
output_format.u.video.width = frame_size >> 32;
output_format.u.video.height = (UINT32)frame_size;
I don't think you need to change this, this is a generic video decoder that's only used for Indeo codec for now (but ideally could be later used to factor more decoders).
One last comment and it looks good to me otherwise.
Note that will also conflict with https://gitlab.winehq.org/wine/wine/-/merge_requests/2640 as I renamed transform_append_element to append_element, so you'll have to rebase on top of it, sorry about that.
On Fri Apr 14 12:37:05 2023 +0000, Rémi Bernon wrote:
I don't think you need to change this, this is a generic video decoder that's only used for Indeo codec for now (but ideally could be later used to factor more decoders).
right, I needed it in previous version of the patch, this is no longer the case. I'll remove it (and I'll rebase since !2640 gets in the way)