This MR adds a test that fails on Wine. It's not an easy fix because the Windows 16 bit OpenGL software renderer is pretty quirky and works differently from Linux OpenGL drivers, I'll make an issue in Bugzilla.
Wine output: ``` opengl.c:1863: Tests skipped: Skipping 16-bit rendering test (no 16 bit pixel format with the DRAW_TO_BITMAP flag was available) ```
Wine output without `todo_wine` and the skip: ``` opengl.c:1860: Test failed: Failed to get a 16-bit DRAW_TO_BITMAP pixel format opengl.c:1869: Test failed: Failed to DescribePixelFormat (error: 2000) opengl.c:1872: Test failed: Wrong amount of color bits (got 0, expected 16) opengl.c:1888: Test failed: Failed to SetPixelFormat (error: 2000) opengl.c:1892: Test failed: Failed to wglCreateContext (error: 2000) 0024:err:opengl:null_glClearColor unsupported 0024:err:opengl:null_glClear unsupported 0024:err:opengl:null_glFinish unsupported 0024:err:opengl:null_glMatrixMode unsupported 0024:err:opengl:null_glLoadIdentity unsupported 0024:err:opengl:null_glOrtho unsupported 0024:err:opengl:null_glMatrixMode unsupported 0024:err:opengl:null_glLoadIdentity unsupported 0024:err:opengl:null_glClearColor unsupported 0024:err:opengl:null_glClear unsupported 0024:err:opengl:null_glColor3f unsupported 0024:err:opengl:null_glLineWidth unsupported 0024:err:opengl:null_glBegin unsupported 0024:err:opengl:null_glVertex2i unsupported 0024:err:opengl:null_glVertex2i unsupported 0024:err:opengl:null_glEnd unsupported 0024:err:opengl:null_glFinish unsupported ```
-- v3: opengl32/tests: Add 16-bit bitmap rendering tests
From: Zowie van Dillen zowie+wine@vandillen.io
--- dlls/opengl32/tests/opengl.c | 143 +++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+)
diff --git a/dlls/opengl32/tests/opengl.c b/dlls/opengl32/tests/opengl.c index 1471a8e7084..a2055e77c4f 100644 --- a/dlls/opengl32/tests/opengl.c +++ b/dlls/opengl32/tests/opengl.c @@ -1816,6 +1816,148 @@ static void test_bitmap_rendering( BOOL use_dib ) winetest_pop_context(); }
+static void test_16bit_bitmap_rendering(void) +{ + PIXELFORMATDESCRIPTOR pfd; + INT pixel_format, success; + HGDIOBJ old_gdi_obj; + USHORT *pixels; + HBITMAP bitmap; + HGLRC gl; + HDC hdc; + + PIXELFORMATDESCRIPTOR pixel_format_args = { + .nSize = sizeof(PIXELFORMATDESCRIPTOR), + .nVersion = 1, + .dwFlags = PFD_DRAW_TO_BITMAP | PFD_SUPPORT_OPENGL | PFD_DEPTH_DONTCARE, + .iPixelType = PFD_TYPE_RGBA, + .iLayerType = PFD_MAIN_PLANE, + .cColorBits = 16, + .cAlphaBits = 0 + }; + BITMAPINFO bitmap_args = { + .bmiHeader = { + .biSize = sizeof(BITMAPINFOHEADER), + .biPlanes = 1, + .biCompression = BI_RGB, + .biWidth = 4, + .biHeight = -4, /* Four pixels tall with the origin in the top-left corner. */ + .biBitCount = 16 + } + }; + + hdc = CreateCompatibleDC(NULL); + ok(hdc != NULL, "Failed to get a device context\n"); + + /* Create a bitmap. */ + bitmap = CreateDIBSection(NULL, &bitmap_args, DIB_RGB_COLORS, (void**)&pixels, NULL, 0); + old_gdi_obj = SelectObject(hdc, bitmap); + ok(old_gdi_obj != NULL, "Failed to SetObject\n"); + + /* Choose a pixel format. */ + pixel_format = ChoosePixelFormat(hdc, &pixel_format_args); + todo_wine ok(pixel_format != 0, "Failed to get a 16 bit pixel format with the DRAW_TO_BITMAP flag.\n"); + + if (pixel_format == 0) + { + skip("Skipping 16-bit rendering test" + " (no 16 bit pixel format with the DRAW_TO_BITMAP flag was available)\n"); + SelectObject(hdc, old_gdi_obj); + DeleteObject(bitmap); + DeleteDC(hdc); + return; + } + + /* When asking for a 16-bit DRAW_TO_BITMAP pixel format, Windows will give you r5g5b5a1 by + * default, even if you didn't ask for an alpha bit. + * + * It's important to note that all of the color bits have to match exactly, because the renders + * are sent back to the CPU and will have to match any other software rendering operations that + * the program does (DRAW_TO_BITMAP is normally used in combination with blitting). */ + success = DescribePixelFormat(hdc, pixel_format, sizeof(pfd), &pfd); + ok(success != 0, "Failed to DescribePixelFormat (error: %lu)\n", GetLastError()); + /* Likely MSDN inaccuracy: According to the PIXELFORMATDESCRIPTOR docs, alpha bits are excluded + * from cColorBits. It doesn't seem like that's true. */ + ok(pfd.cColorBits == 16, "Wrong amount of color bits (got %d, expected 16)\n", pfd.cColorBits); + todo_wine ok(pfd.cRedBits == 5, "Wrong amount of red bits (got %d, expected 5)\n", pfd.cRedBits); + todo_wine ok(pfd.cGreenBits == 5, "Wrong amount of green bits (got %d, expected 5)\n", pfd.cGreenBits); + todo_wine ok(pfd.cBlueBits == 5, "Wrong amount of blue bits (got %d, expected 5)\n", pfd.cBlueBits); + /* Quirky: It seems that there's an alpha bit, but it somehow doesn't count as one for + * DescribePixelFormat. On Windows cAlphaBits is zero. + * ok(pfd.cAlphaBits == 1, "Wrong amount of alpha bits (got %d, expected 1)\n", pfd.cAlphaBits); */ + todo_wine ok(pfd.cRedShift == 10, "Wrong red shift (got %d, expected 10)\n", pfd.cRedShift); + todo_wine ok(pfd.cGreenShift == 5, "Wrong green shift (got %d, expected 5)\n", pfd.cGreenShift); + /* Blue shift bits is driver-dependent so this test is likely to pass. + * I've used `todo_wine_if` to disable the warning. */ + todo_wine_if(pfd.cBlueShift != 0) + ok(pfd.cBlueShift == 0, "Wrong blue shift (got %d, expected 0)\n", pfd.cBlueShift); + + success = SetPixelFormat(hdc, pixel_format, &pixel_format_args); + ok(success, "Failed to SetPixelFormat (error: %lu)\n", GetLastError()); + + /* Create an OpenGL context. */ + gl = wglCreateContext(hdc); + ok(gl != NULL, "Failed to wglCreateContext (error: %lu)\n", GetLastError()); + success = wglMakeCurrent(hdc, gl); + ok(success, "Failed to wglMakeCurrent (error: %lu)\n", GetLastError()); + + /* Try setting the bitmap to white. */ + glClearColor(1.0f, 1.0f, 1.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + glFinish(); + todo_wine ok(pixels[0] == 0x7fff, "Wrong color after glClear at (0, 0): %#x\n", pixels[0]); + todo_wine ok(pixels[1] == 0x7fff, "Wrong color after glClear at (1, 0): %#x\n", pixels[1]); + + /* Try setting the bitmap to black with a white line. */ + glMatrixMode(GL_PROJECTION); + glLoadIdentity(); + glOrtho(0.0f, 4.0f, 4.0f, 0.0f, -1.0f, 1.0f); + glMatrixMode(GL_MODELVIEW); + glLoadIdentity(); + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT); + + glColor3f(1.0f, 1.0f, 1.0f); + glLineWidth(1.0f); + glBegin(GL_LINES); + glVertex2i(1, 1); + glVertex2i(1, 3); + glEnd(); + + glFinish(); + + { + /* Note that the line stops at (1,2) on Windows despite the second vertex being (1,3). + * I'm not sure if that's an implementation quirk or expected OpenGL behaviour. */ + USHORT X = 0x7fff, _ = 0x0; + USHORT expected[16] = { + _,_,_,_, + _,X,_,_, + _,X,_,_, + _,_,_,_ + }; + + for (int i = 0; i < 16; i++) + { + BOOL matches = (pixels[i] == expected[i]); + int x = i % 4; + int y = i / 4; + /* I'm using a loop so that I can put the expected image in an easy-to-understand array. + * Unfortunately this way of working doesn't work great with `todo_wine` since only half + * of the elements are a mismatch. I'm using `todo_wine_if` as a workaround. */ + todo_wine_if(!matches) ok(matches, "Wrong color at (%d,%d). Got %#x, expected %#x\n", + x, y, pixels[i], expected[i]); + } + } + + /* Clean up. */ + wglDeleteContext(gl); + SelectObject(hdc, old_gdi_obj); + DeleteObject(bitmap); + DeleteDC(hdc); +} + static void test_d3dkmt_rendering(void) { static const RECT expect_rect = {0, 0, 4, 4}; @@ -3443,6 +3585,7 @@ START_TEST(opengl)
test_bitmap_rendering( TRUE ); test_bitmap_rendering( FALSE ); + test_16bit_bitmap_rendering(); test_d3dkmt_rendering(); test_minimized(); test_window_dc();
Rémi Bernon (@rbernon) commented about dlls/opengl32/tests/opengl.c:
- ok(success != 0, "Failed to DescribePixelFormat (error: %lu)\n", GetLastError());
- /* Likely MSDN inaccuracy: According to the PIXELFORMATDESCRIPTOR docs, alpha bits are excluded
* from cColorBits. It doesn't seem like that's true. */
- ok(pfd.cColorBits == 16, "Wrong amount of color bits (got %d, expected 16)\n", pfd.cColorBits);
- todo_wine ok(pfd.cRedBits == 5, "Wrong amount of red bits (got %d, expected 5)\n", pfd.cRedBits);
- todo_wine ok(pfd.cGreenBits == 5, "Wrong amount of green bits (got %d, expected 5)\n", pfd.cGreenBits);
- todo_wine ok(pfd.cBlueBits == 5, "Wrong amount of blue bits (got %d, expected 5)\n", pfd.cBlueBits);
- /* Quirky: It seems that there's an alpha bit, but it somehow doesn't count as one for
* DescribePixelFormat. On Windows cAlphaBits is zero.
* ok(pfd.cAlphaBits == 1, "Wrong amount of alpha bits (got %d, expected 1)\n", pfd.cAlphaBits); */
- todo_wine ok(pfd.cRedShift == 10, "Wrong red shift (got %d, expected 10)\n", pfd.cRedShift);
- todo_wine ok(pfd.cGreenShift == 5, "Wrong green shift (got %d, expected 5)\n", pfd.cGreenShift);
- /* Blue shift bits is driver-dependent so this test is likely to pass.
* I've used `todo_wine_if` to disable the warning. */
- todo_wine_if(pfd.cBlueShift != 0)
ok(pfd.cBlueShift == 0, "Wrong blue shift (got %d, expected 0)\n", pfd.cBlueShift);
This will make the todo_wine stay unnoticed once the implementation is fixed, do we actually need it? Could it be either removed or made unconditional?
Looks mostly good otherwise, thank you for looking into that! Would be really nice to fix that long standing issue :)
On Wed Sep 3 07:47:00 2025 +0000, Rémi Bernon wrote:
This will make the todo_wine stay unnoticed once the implementation is fixed, do we actually need it? Could it be either removed or made unconditional?
I could remove it — checking red shift and green shift is probably sufficient. I feel like it's a little bit more robust to also make sure blue shift is zero but it's not strictly necessary.
On Wed Sep 3 07:49:50 2025 +0000, Zowie van Dillen wrote:
I could remove it — checking red shift and green shift is probably sufficient. I feel like it's a little bit more robust to also make sure blue shift is zero but it's not strictly necessary.
Sure, then I think the todo_wine could probably be removed for now. This code isn't executed yet anyway, and it should pass when we'll have the right pixel format exposed?