From d5b2a0159ea51535a48ffc798f08ee3149b1f932 Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Fri, 1 Sep 2023 19:30:30 -0500 Subject: [PATCH 1/7] Align MVKPixelFormat definitions --- .../MoltenVK/GPUObjects/MVKPixelFormats.mm | 740 +++++++++--------- 1 file changed, 370 insertions(+), 370 deletions(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm index 7e1c1a193..9658d20f8 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm @@ -829,176 +829,176 @@ // UNDEFINED must come first. addVkFormatDesc( UNDEFINED, Invalid, Invalid, Invalid, Invalid, 1, 1, 0, None ); - addVkFormatDesc( R4G4_UNORM_PACK8, Invalid, Invalid, Invalid, Invalid, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R4G4_UNORM_PACK8, Invalid, Invalid, Invalid, Invalid, 1, 1, 1, ColorFloat ); addVkFormatDesc( R4G4B4A4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( B4G4R4A4_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( B4G4R4A4_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); addVkFormatDescSwizzled( A4R4G4B4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat, G, B, A, R ); addVkFormatDescSwizzled( A4B4G4R4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat, A, B, G, R ); - addVkFormatDesc( R5G6B5_UNORM_PACK16, B5G6R5Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( B5G6R5_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R5G6B5_UNORM_PACK16, B5G6R5Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( B5G6R5_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); addVkFormatDesc( R5G5B5A1_UNORM_PACK16, A1BGR5Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( B5G5R5A1_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( B5G5R5A1_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); addVkFormatDesc( A1R5G5B5_UNORM_PACK16, BGR5A1Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8_UNORM, R8Unorm, Invalid, UCharNormalized, UChar2Normalized, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_SNORM, R8Snorm, Invalid, CharNormalized, Char2Normalized, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_USCALED, Invalid, Invalid, UChar, UChar2, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_SSCALED, Invalid, Invalid, Char, Char2, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_UINT, R8Uint, Invalid, UChar, UChar2, 1, 1, 1, ColorUInt8 ); - addVkFormatDesc( R8_SINT, R8Sint, Invalid, Char, Char2, 1, 1, 1, ColorInt8 ); - addVkFormatDesc( R8_SRGB, R8Unorm_sRGB, Invalid, UCharNormalized, UChar2Normalized, 1, 1, 1, ColorFloat ); - - addVkFormatDesc( R8G8_UNORM, RG8Unorm, Invalid, UChar2Normalized, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_SNORM, RG8Snorm, Invalid, Char2Normalized, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_USCALED, Invalid, Invalid, UChar2, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_SSCALED, Invalid, Invalid, Char2, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_UINT, RG8Uint, Invalid, UChar2, Invalid, 1, 1, 2, ColorUInt8 ); - addVkFormatDesc( R8G8_SINT, RG8Sint, Invalid, Char2, Invalid, 1, 1, 2, ColorInt8 ); - addVkFormatDesc( R8G8_SRGB, RG8Unorm_sRGB, Invalid, UChar2Normalized, Invalid, 1, 1, 2, ColorFloat ); - - addVkFormatDesc( R8G8B8_UNORM, Invalid, Invalid, UChar3Normalized, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_SNORM, Invalid, Invalid, Char3Normalized, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_USCALED, Invalid, Invalid, UChar3, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_SSCALED, Invalid, Invalid, Char3, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_UINT, Invalid, Invalid, UChar3, Invalid, 1, 1, 3, ColorUInt8 ); - addVkFormatDesc( R8G8B8_SINT, Invalid, Invalid, Char3, Invalid, 1, 1, 3, ColorInt8 ); - addVkFormatDesc( R8G8B8_SRGB, Invalid, Invalid, UChar3Normalized, Invalid, 1, 1, 3, ColorFloat ); - - addVkFormatDesc( B8G8R8_UNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( B8G8R8_SNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8_UNORM, R8Unorm, Invalid, UCharNormalized, UChar2Normalized, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_SNORM, R8Snorm, Invalid, CharNormalized, Char2Normalized, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_USCALED, Invalid, Invalid, UChar, UChar2, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_SSCALED, Invalid, Invalid, Char, Char2, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_UINT, R8Uint, Invalid, UChar, UChar2, 1, 1, 1, ColorUInt8 ); + addVkFormatDesc( R8_SINT, R8Sint, Invalid, Char, Char2, 1, 1, 1, ColorInt8 ); + addVkFormatDesc( R8_SRGB, R8Unorm_sRGB, Invalid, UCharNormalized, UChar2Normalized, 1, 1, 1, ColorFloat ); + + addVkFormatDesc( R8G8_UNORM, RG8Unorm, Invalid, UChar2Normalized, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_SNORM, RG8Snorm, Invalid, Char2Normalized, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_USCALED, Invalid, Invalid, UChar2, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_SSCALED, Invalid, Invalid, Char2, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_UINT, RG8Uint, Invalid, UChar2, Invalid, 1, 1, 2, ColorUInt8 ); + addVkFormatDesc( R8G8_SINT, RG8Sint, Invalid, Char2, Invalid, 1, 1, 2, ColorInt8 ); + addVkFormatDesc( R8G8_SRGB, RG8Unorm_sRGB, Invalid, UChar2Normalized, Invalid, 1, 1, 2, ColorFloat ); + + addVkFormatDesc( R8G8B8_UNORM, Invalid, Invalid, UChar3Normalized, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_SNORM, Invalid, Invalid, Char3Normalized, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_USCALED, Invalid, Invalid, UChar3, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_SSCALED, Invalid, Invalid, Char3, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_UINT, Invalid, Invalid, UChar3, Invalid, 1, 1, 3, ColorUInt8 ); + addVkFormatDesc( R8G8B8_SINT, Invalid, Invalid, Char3, Invalid, 1, 1, 3, ColorInt8 ); + addVkFormatDesc( R8G8B8_SRGB, Invalid, Invalid, UChar3Normalized, Invalid, 1, 1, 3, ColorFloat ); + + addVkFormatDesc( B8G8R8_UNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( B8G8R8_SNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); addVkFormatDesc( B8G8R8_USCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); addVkFormatDesc( B8G8R8_SSCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( B8G8R8_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorUInt8 ); - addVkFormatDesc( B8G8R8_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorInt8 ); - addVkFormatDesc( B8G8R8_SRGB, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - - addVkFormatDesc( R8G8B8A8_UNORM, RGBA8Unorm, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_SNORM, RGBA8Snorm, Invalid, Char4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_USCALED, Invalid, Invalid, UChar4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_SSCALED, Invalid, Invalid, Char4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_UINT, RGBA8Uint, Invalid, UChar4, Invalid, 1, 1, 4, ColorUInt8 ); - addVkFormatDesc( R8G8B8A8_SINT, RGBA8Sint, Invalid, Char4, Invalid, 1, 1, 4, ColorInt8 ); - addVkFormatDesc( R8G8B8A8_SRGB, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( B8G8R8A8_UNORM, BGRA8Unorm, Invalid, UChar4Normalized_BGRA, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_SNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_USCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_SSCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt8 ); - addVkFormatDesc( B8G8R8A8_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt8 ); - addVkFormatDesc( B8G8R8A8_SRGB, BGRA8Unorm_sRGB, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( A8B8G8R8_UNORM_PACK32, RGBA8Unorm, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_SNORM_PACK32, RGBA8Snorm, Invalid, Char4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_USCALED_PACK32, Invalid, Invalid, UChar4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_SSCALED_PACK32, Invalid, Invalid, Char4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_UINT_PACK32, RGBA8Uint, Invalid, UChar4, Invalid, 1, 1, 4, ColorUInt8 ); - addVkFormatDesc( A8B8G8R8_SINT_PACK32, RGBA8Sint, Invalid, Char4, Invalid, 1, 1, 4, ColorInt8 ); - addVkFormatDesc( A8B8G8R8_SRGB_PACK32, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( A2R10G10B10_UNORM_PACK32, BGR10A2Unorm, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_SNORM_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_USCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_SSCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_UINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); - addVkFormatDesc( A2R10G10B10_SINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); - - addVkFormatDesc( A2B10G10R10_UNORM_PACK32, RGB10A2Unorm, Invalid, UInt1010102Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_SNORM_PACK32, Invalid, Invalid, Int1010102Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_USCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_SSCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_UINT_PACK32, RGB10A2Uint, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); - addVkFormatDesc( A2B10G10R10_SINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); - - addVkFormatDesc( R16_UNORM, R16Unorm, Invalid, UShortNormalized, UShort2Normalized, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_SNORM, R16Snorm, Invalid, ShortNormalized, Short2Normalized, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_USCALED, Invalid, Invalid, UShort, UShort2, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_SSCALED, Invalid, Invalid, Short, Short2, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_UINT, R16Uint, Invalid, UShort, UShort2, 1, 1, 2, ColorUInt16 ); - addVkFormatDesc( R16_SINT, R16Sint, Invalid, Short, Short2, 1, 1, 2, ColorInt16 ); - addVkFormatDesc( R16_SFLOAT, R16Float, Invalid, Half, Half2, 1, 1, 2, ColorFloat ); - - addVkFormatDesc( R16G16_UNORM, RG16Unorm, Invalid, UShort2Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_SNORM, RG16Snorm, Invalid, Short2Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_USCALED, Invalid, Invalid, UShort2, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_SSCALED, Invalid, Invalid, Short2, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_UINT, RG16Uint, Invalid, UShort2, Invalid, 1, 1, 4, ColorUInt16 ); - addVkFormatDesc( R16G16_SINT, RG16Sint, Invalid, Short2, Invalid, 1, 1, 4, ColorInt16 ); - addVkFormatDesc( R16G16_SFLOAT, RG16Float, Invalid, Half2, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( R16G16B16_UNORM, Invalid, Invalid, UShort3Normalized, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_SNORM, Invalid, Invalid, Short3Normalized, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_USCALED, Invalid, Invalid, UShort3, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_SSCALED, Invalid, Invalid, Short3, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_UINT, Invalid, Invalid, UShort3, Invalid, 1, 1, 6, ColorUInt16 ); - addVkFormatDesc( R16G16B16_SINT, Invalid, Invalid, Short3, Invalid, 1, 1, 6, ColorInt16 ); - addVkFormatDesc( R16G16B16_SFLOAT, Invalid, Invalid, Half3, Invalid, 1, 1, 6, ColorFloat ); - - addVkFormatDesc( R16G16B16A16_UNORM, RGBA16Unorm, Invalid, UShort4Normalized, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_SNORM, RGBA16Snorm, Invalid, Short4Normalized, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_USCALED, Invalid, Invalid, UShort4, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_SSCALED, Invalid, Invalid, Short4, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_UINT, RGBA16Uint, Invalid, UShort4, Invalid, 1, 1, 8, ColorUInt16 ); - addVkFormatDesc( R16G16B16A16_SINT, RGBA16Sint, Invalid, Short4, Invalid, 1, 1, 8, ColorInt16 ); - addVkFormatDesc( R16G16B16A16_SFLOAT, RGBA16Float, Invalid, Half4, Invalid, 1, 1, 8, ColorFloat ); - - addVkFormatDesc( R32_UINT, R32Uint, Invalid, UInt, Invalid, 1, 1, 4, ColorUInt32 ); - addVkFormatDesc( R32_SINT, R32Sint, Invalid, Int, Invalid, 1, 1, 4, ColorInt32 ); + addVkFormatDesc( B8G8R8_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorUInt8 ); + addVkFormatDesc( B8G8R8_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorInt8 ); + addVkFormatDesc( B8G8R8_SRGB, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + + addVkFormatDesc( R8G8B8A8_UNORM, RGBA8Unorm, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_SNORM, RGBA8Snorm, Invalid, Char4Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_USCALED, Invalid, Invalid, UChar4, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_SSCALED, Invalid, Invalid, Char4, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_UINT, RGBA8Uint, Invalid, UChar4, Invalid, 1, 1, 4, ColorUInt8 ); + addVkFormatDesc( R8G8B8A8_SINT, RGBA8Sint, Invalid, Char4, Invalid, 1, 1, 4, ColorInt8 ); + addVkFormatDesc( R8G8B8A8_SRGB, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( B8G8R8A8_UNORM, BGRA8Unorm, Invalid, UChar4Normalized_BGRA, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_SNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_USCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_SSCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt8 ); + addVkFormatDesc( B8G8R8A8_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt8 ); + addVkFormatDesc( B8G8R8A8_SRGB, BGRA8Unorm_sRGB, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( A8B8G8R8_UNORM_PACK32, RGBA8Unorm, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_SNORM_PACK32, RGBA8Snorm, Invalid, Char4Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_USCALED_PACK32, Invalid, Invalid, UChar4, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_SSCALED_PACK32, Invalid, Invalid, Char4, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_UINT_PACK32, RGBA8Uint, Invalid, UChar4, Invalid, 1, 1, 4, ColorUInt8 ); + addVkFormatDesc( A8B8G8R8_SINT_PACK32, RGBA8Sint, Invalid, Char4, Invalid, 1, 1, 4, ColorInt8 ); + addVkFormatDesc( A8B8G8R8_SRGB_PACK32, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( A2R10G10B10_UNORM_PACK32, BGR10A2Unorm, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_SNORM_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_USCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_SSCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_UINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); + addVkFormatDesc( A2R10G10B10_SINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); + + addVkFormatDesc( A2B10G10R10_UNORM_PACK32, RGB10A2Unorm, Invalid, UInt1010102Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_SNORM_PACK32, Invalid, Invalid, Int1010102Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_USCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_SSCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_UINT_PACK32, RGB10A2Uint, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); + addVkFormatDesc( A2B10G10R10_SINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); + + addVkFormatDesc( R16_UNORM, R16Unorm, Invalid, UShortNormalized, UShort2Normalized, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_SNORM, R16Snorm, Invalid, ShortNormalized, Short2Normalized, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_USCALED, Invalid, Invalid, UShort, UShort2, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_SSCALED, Invalid, Invalid, Short, Short2, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_UINT, R16Uint, Invalid, UShort, UShort2, 1, 1, 2, ColorUInt16 ); + addVkFormatDesc( R16_SINT, R16Sint, Invalid, Short, Short2, 1, 1, 2, ColorInt16 ); + addVkFormatDesc( R16_SFLOAT, R16Float, Invalid, Half, Half2, 1, 1, 2, ColorFloat ); + + addVkFormatDesc( R16G16_UNORM, RG16Unorm, Invalid, UShort2Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_SNORM, RG16Snorm, Invalid, Short2Normalized, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_USCALED, Invalid, Invalid, UShort2, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_SSCALED, Invalid, Invalid, Short2, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_UINT, RG16Uint, Invalid, UShort2, Invalid, 1, 1, 4, ColorUInt16 ); + addVkFormatDesc( R16G16_SINT, RG16Sint, Invalid, Short2, Invalid, 1, 1, 4, ColorInt16 ); + addVkFormatDesc( R16G16_SFLOAT, RG16Float, Invalid, Half2, Invalid, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( R16G16B16_UNORM, Invalid, Invalid, UShort3Normalized, Invalid, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_SNORM, Invalid, Invalid, Short3Normalized, Invalid, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_USCALED, Invalid, Invalid, UShort3, Invalid, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_SSCALED, Invalid, Invalid, Short3, Invalid, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_UINT, Invalid, Invalid, UShort3, Invalid, 1, 1, 6, ColorUInt16 ); + addVkFormatDesc( R16G16B16_SINT, Invalid, Invalid, Short3, Invalid, 1, 1, 6, ColorInt16 ); + addVkFormatDesc( R16G16B16_SFLOAT, Invalid, Invalid, Half3, Invalid, 1, 1, 6, ColorFloat ); + + addVkFormatDesc( R16G16B16A16_UNORM, RGBA16Unorm, Invalid, UShort4Normalized, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_SNORM, RGBA16Snorm, Invalid, Short4Normalized, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_USCALED, Invalid, Invalid, UShort4, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_SSCALED, Invalid, Invalid, Short4, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_UINT, RGBA16Uint, Invalid, UShort4, Invalid, 1, 1, 8, ColorUInt16 ); + addVkFormatDesc( R16G16B16A16_SINT, RGBA16Sint, Invalid, Short4, Invalid, 1, 1, 8, ColorInt16 ); + addVkFormatDesc( R16G16B16A16_SFLOAT, RGBA16Float, Invalid, Half4, Invalid, 1, 1, 8, ColorFloat ); + + addVkFormatDesc( R32_UINT, R32Uint, Invalid, UInt, Invalid, 1, 1, 4, ColorUInt32 ); + addVkFormatDesc( R32_SINT, R32Sint, Invalid, Int, Invalid, 1, 1, 4, ColorInt32 ); addVkFormatDesc( R32_SFLOAT, R32Float, Invalid, Float, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R32G32_UINT, RG32Uint, Invalid, UInt2, Invalid, 1, 1, 8, ColorUInt32 ); - addVkFormatDesc( R32G32_SINT, RG32Sint, Invalid, Int2, Invalid, 1, 1, 8, ColorInt32 ); + addVkFormatDesc( R32G32_UINT, RG32Uint, Invalid, UInt2, Invalid, 1, 1, 8, ColorUInt32 ); + addVkFormatDesc( R32G32_SINT, RG32Sint, Invalid, Int2, Invalid, 1, 1, 8, ColorInt32 ); addVkFormatDesc( R32G32_SFLOAT, RG32Float, Invalid, Float2, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R32G32B32_UINT, Invalid, Invalid, UInt3, Invalid, 1, 1, 12, ColorUInt32 ); - addVkFormatDesc( R32G32B32_SINT, Invalid, Invalid, Int3, Invalid, 1, 1, 12, ColorInt32 ); + addVkFormatDesc( R32G32B32_UINT, Invalid, Invalid, UInt3, Invalid, 1, 1, 12, ColorUInt32 ); + addVkFormatDesc( R32G32B32_SINT, Invalid, Invalid, Int3, Invalid, 1, 1, 12, ColorInt32 ); addVkFormatDesc( R32G32B32_SFLOAT, Invalid, Invalid, Float3, Invalid, 1, 1, 12, ColorFloat ); - addVkFormatDesc( R32G32B32A32_UINT, RGBA32Uint, Invalid, UInt4, Invalid, 1, 1, 16, ColorUInt32 ); - addVkFormatDesc( R32G32B32A32_SINT, RGBA32Sint, Invalid, Int4, Invalid, 1, 1, 16, ColorInt32 ); + addVkFormatDesc( R32G32B32A32_UINT, RGBA32Uint, Invalid, UInt4, Invalid, 1, 1, 16, ColorUInt32 ); + addVkFormatDesc( R32G32B32A32_SINT, RGBA32Sint, Invalid, Int4, Invalid, 1, 1, 16, ColorInt32 ); addVkFormatDesc( R32G32B32A32_SFLOAT, RGBA32Float, Invalid, Float4, Invalid, 1, 1, 16, ColorFloat ); - addVkFormatDesc( R64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); addVkFormatDesc( R64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R64G64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); - addVkFormatDesc( R64G64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); + addVkFormatDesc( R64G64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); + addVkFormatDesc( R64G64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); addVkFormatDesc( R64G64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); - addVkFormatDesc( R64G64B64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); - addVkFormatDesc( R64G64B64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); + addVkFormatDesc( R64G64B64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); + addVkFormatDesc( R64G64B64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); addVkFormatDesc( R64G64B64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); - addVkFormatDesc( R64G64B64A64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); - addVkFormatDesc( R64G64B64A64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); + addVkFormatDesc( R64G64B64A64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); + addVkFormatDesc( R64G64B64A64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); addVkFormatDesc( R64G64B64A64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); addVkFormatDesc( B10G11R11_UFLOAT_PACK32, RG11B10Float, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( E5B9G9R9_UFLOAT_PACK32, RGB9E5Float, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( E5B9G9R9_UFLOAT_PACK32, RGB9E5Float, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( D32_SFLOAT, Depth32Float, Invalid, Invalid, Invalid, 1, 1, 4, DepthStencil ); + addVkFormatDesc( D32_SFLOAT, Depth32Float, Invalid, Invalid, Invalid, 1, 1, 4, DepthStencil ); addVkFormatDesc( D32_SFLOAT_S8_UINT, Depth32Float_Stencil8, Invalid, Invalid, Invalid, 1, 1, 5, DepthStencil ); addVkFormatDesc( S8_UINT, Stencil8, Invalid, Invalid, Invalid, 1, 1, 1, DepthStencil ); - addVkFormatDesc( D16_UNORM, Depth16Unorm, Depth32Float, Invalid, Invalid, 1, 1, 2, DepthStencil ); - addVkFormatDesc( D16_UNORM_S8_UINT, Invalid, Depth16Unorm_Stencil8, Invalid, Invalid, 1, 1, 3, DepthStencil ); + addVkFormatDesc( D16_UNORM, Depth16Unorm, Depth32Float, Invalid, Invalid, 1, 1, 2, DepthStencil ); + addVkFormatDesc( D16_UNORM_S8_UINT, Invalid, Depth16Unorm_Stencil8, Invalid, Invalid, 1, 1, 3, DepthStencil ); addVkFormatDesc( D24_UNORM_S8_UINT, Depth24Unorm_Stencil8, Depth32Float_Stencil8, Invalid, Invalid, 1, 1, 4, DepthStencil ); addVkFormatDesc( X8_D24_UNORM_PACK32, Invalid, Depth24Unorm_Stencil8, Invalid, Invalid, 1, 1, 4, DepthStencil ); - addVkFormatDesc( BC1_RGB_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC1_RGB_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC1_RGBA_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC1_RGBA_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGB_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGB_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGBA_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGBA_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC2_UNORM_BLOCK, BC2_RGBA, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC2_SRGB_BLOCK, BC2_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC2_UNORM_BLOCK, BC2_RGBA, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC2_SRGB_BLOCK, BC2_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC3_UNORM_BLOCK, BC3_RGBA, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC3_SRGB_BLOCK, BC3_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC3_UNORM_BLOCK, BC3_RGBA, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC3_SRGB_BLOCK, BC3_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); addVkFormatDesc( BC4_UNORM_BLOCK, BC4_RUnorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); addVkFormatDesc( BC4_SNORM_BLOCK, BC4_RSnorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); @@ -1007,18 +1007,18 @@ addVkFormatDesc( BC5_SNORM_BLOCK, BC5_RGSnorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); addVkFormatDesc( BC6H_UFLOAT_BLOCK, BC6H_RGBUfloat, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC6H_SFLOAT_BLOCK, BC6H_RGBFloat, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC6H_SFLOAT_BLOCK, BC6H_RGBFloat, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC7_UNORM_BLOCK, BC7_RGBAUnorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC7_SRGB_BLOCK, BC7_RGBAUnorm_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC7_UNORM_BLOCK, BC7_RGBAUnorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC7_SRGB_BLOCK, BC7_RGBAUnorm_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ETC2_R8G8B8_UNORM_BLOCK, ETC2_RGB8, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8_SRGB_BLOCK, ETC2_RGB8_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A1_UNORM_BLOCK, ETC2_RGB8A1, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A1_SRGB_BLOCK, ETC2_RGB8A1_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8_UNORM_BLOCK, ETC2_RGB8, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8_SRGB_BLOCK, ETC2_RGB8_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A1_UNORM_BLOCK, ETC2_RGB8A1, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A1_SRGB_BLOCK, ETC2_RGB8A1_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A8_UNORM_BLOCK, EAC_RGBA8, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A8_SRGB_BLOCK, EAC_RGBA8_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A8_UNORM_BLOCK, EAC_RGBA8, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A8_SRGB_BLOCK, EAC_RGBA8_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); addVkFormatDesc( EAC_R11_UNORM_BLOCK, EAC_R11Unorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); addVkFormatDesc( EAC_R11_SNORM_BLOCK, EAC_R11Snorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); @@ -1026,94 +1026,94 @@ addVkFormatDesc( EAC_R11G11_UNORM_BLOCK, EAC_RG11Unorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); addVkFormatDesc( EAC_R11G11_SNORM_BLOCK, EAC_RG11Snorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_4x4_UNORM_BLOCK, ASTC_4x4_LDR, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_4x4_SFLOAT_BLOCK_EXT, ASTC_4x4_HDR, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_4x4_SRGB_BLOCK, ASTC_4x4_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x4_UNORM_BLOCK, ASTC_5x4_LDR, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x4_SFLOAT_BLOCK_EXT, ASTC_5x4_HDR, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x4_SRGB_BLOCK, ASTC_5x4_sRGB, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x5_UNORM_BLOCK, ASTC_5x5_LDR, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); - addVkFormatDesc( ASTC_5x5_SFLOAT_BLOCK_EXT, ASTC_5x5_HDR, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); - addVkFormatDesc( ASTC_5x5_SRGB_BLOCK, ASTC_5x5_sRGB, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x5_UNORM_BLOCK, ASTC_6x5_LDR, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x5_SFLOAT_BLOCK_EXT, ASTC_6x5_HDR, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x5_SRGB_BLOCK, ASTC_6x5_sRGB, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x6_UNORM_BLOCK, ASTC_6x6_LDR, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); - addVkFormatDesc( ASTC_6x6_SFLOAT_BLOCK_EXT, ASTC_6x6_HDR, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); - addVkFormatDesc( ASTC_6x6_SRGB_BLOCK, ASTC_6x6_sRGB, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x5_UNORM_BLOCK, ASTC_8x5_LDR, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); - addVkFormatDesc( ASTC_8x5_SFLOAT_BLOCK_EXT, ASTC_8x5_HDR, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); - addVkFormatDesc( ASTC_8x5_SRGB_BLOCK, ASTC_8x5_sRGB, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); - addVkFormatDesc( ASTC_8x6_UNORM_BLOCK, ASTC_8x6_LDR, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x6_SFLOAT_BLOCK_EXT, ASTC_8x6_HDR, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x6_SRGB_BLOCK, ASTC_8x6_sRGB, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x8_UNORM_BLOCK, ASTC_8x8_LDR, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); - addVkFormatDesc( ASTC_8x8_SFLOAT_BLOCK_EXT, ASTC_8x8_HDR, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); - addVkFormatDesc( ASTC_8x8_SRGB_BLOCK, ASTC_8x8_sRGB, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x5_UNORM_BLOCK, ASTC_10x5_LDR, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); - addVkFormatDesc( ASTC_10x5_SFLOAT_BLOCK_EXT, ASTC_10x5_HDR, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); - addVkFormatDesc( ASTC_10x5_SRGB_BLOCK, ASTC_10x5_sRGB, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); - addVkFormatDesc( ASTC_10x6_UNORM_BLOCK, ASTC_10x6_LDR, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); - addVkFormatDesc( ASTC_10x6_SFLOAT_BLOCK_EXT, ASTC_10x6_HDR, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); - addVkFormatDesc( ASTC_10x6_SRGB_BLOCK, ASTC_10x6_sRGB, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); - addVkFormatDesc( ASTC_10x8_UNORM_BLOCK, ASTC_10x8_LDR, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x8_SFLOAT_BLOCK_EXT, ASTC_10x8_HDR, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x8_SRGB_BLOCK, ASTC_10x8_sRGB, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x10_UNORM_BLOCK, ASTC_10x10_LDR, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); - addVkFormatDesc( ASTC_10x10_SFLOAT_BLOCK_EXT, ASTC_10x10_HDR, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); - addVkFormatDesc( ASTC_10x10_SRGB_BLOCK, ASTC_10x10_sRGB, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x10_UNORM_BLOCK, ASTC_12x10_LDR, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x10_SFLOAT_BLOCK_EXT, ASTC_12x10_HDR, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x10_SRGB_BLOCK, ASTC_12x10_sRGB, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x12_UNORM_BLOCK, ASTC_12x12_LDR, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); - addVkFormatDesc( ASTC_12x12_SFLOAT_BLOCK_EXT, ASTC_12x12_HDR, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); - addVkFormatDesc( ASTC_12x12_SRGB_BLOCK, ASTC_12x12_sRGB, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); + addVkFormatDesc( ASTC_4x4_UNORM_BLOCK, ASTC_4x4_LDR, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ASTC_4x4_SFLOAT_BLOCK_EXT, ASTC_4x4_HDR, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ASTC_4x4_SRGB_BLOCK, ASTC_4x4_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x4_UNORM_BLOCK, ASTC_5x4_LDR, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x4_SFLOAT_BLOCK_EXT, ASTC_5x4_HDR, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x4_SRGB_BLOCK, ASTC_5x4_sRGB, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x5_UNORM_BLOCK, ASTC_5x5_LDR, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); + addVkFormatDesc( ASTC_5x5_SFLOAT_BLOCK_EXT, ASTC_5x5_HDR, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); + addVkFormatDesc( ASTC_5x5_SRGB_BLOCK, ASTC_5x5_sRGB, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x5_UNORM_BLOCK, ASTC_6x5_LDR, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x5_SFLOAT_BLOCK_EXT, ASTC_6x5_HDR, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x5_SRGB_BLOCK, ASTC_6x5_sRGB, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x6_UNORM_BLOCK, ASTC_6x6_LDR, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); + addVkFormatDesc( ASTC_6x6_SFLOAT_BLOCK_EXT, ASTC_6x6_HDR, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); + addVkFormatDesc( ASTC_6x6_SRGB_BLOCK, ASTC_6x6_sRGB, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x5_UNORM_BLOCK, ASTC_8x5_LDR, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); + addVkFormatDesc( ASTC_8x5_SFLOAT_BLOCK_EXT, ASTC_8x5_HDR, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); + addVkFormatDesc( ASTC_8x5_SRGB_BLOCK, ASTC_8x5_sRGB, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); + addVkFormatDesc( ASTC_8x6_UNORM_BLOCK, ASTC_8x6_LDR, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x6_SFLOAT_BLOCK_EXT, ASTC_8x6_HDR, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x6_SRGB_BLOCK, ASTC_8x6_sRGB, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x8_UNORM_BLOCK, ASTC_8x8_LDR, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); + addVkFormatDesc( ASTC_8x8_SFLOAT_BLOCK_EXT, ASTC_8x8_HDR, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); + addVkFormatDesc( ASTC_8x8_SRGB_BLOCK, ASTC_8x8_sRGB, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x5_UNORM_BLOCK, ASTC_10x5_LDR, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); + addVkFormatDesc( ASTC_10x5_SFLOAT_BLOCK_EXT, ASTC_10x5_HDR, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); + addVkFormatDesc( ASTC_10x5_SRGB_BLOCK, ASTC_10x5_sRGB, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); + addVkFormatDesc( ASTC_10x6_UNORM_BLOCK, ASTC_10x6_LDR, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); + addVkFormatDesc( ASTC_10x6_SFLOAT_BLOCK_EXT, ASTC_10x6_HDR, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); + addVkFormatDesc( ASTC_10x6_SRGB_BLOCK, ASTC_10x6_sRGB, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); + addVkFormatDesc( ASTC_10x8_UNORM_BLOCK, ASTC_10x8_LDR, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x8_SFLOAT_BLOCK_EXT, ASTC_10x8_HDR, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x8_SRGB_BLOCK, ASTC_10x8_sRGB, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x10_UNORM_BLOCK, ASTC_10x10_LDR, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); + addVkFormatDesc( ASTC_10x10_SFLOAT_BLOCK_EXT, ASTC_10x10_HDR, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); + addVkFormatDesc( ASTC_10x10_SRGB_BLOCK, ASTC_10x10_sRGB, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x10_UNORM_BLOCK, ASTC_12x10_LDR, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x10_SFLOAT_BLOCK_EXT, ASTC_12x10_HDR, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x10_SRGB_BLOCK, ASTC_12x10_sRGB, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x12_UNORM_BLOCK, ASTC_12x12_LDR, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); + addVkFormatDesc( ASTC_12x12_SFLOAT_BLOCK_EXT, ASTC_12x12_HDR, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); + addVkFormatDesc( ASTC_12x12_SRGB_BLOCK, ASTC_12x12_sRGB, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); // Extension VK_IMG_format_pvrtc - addVkFormatDesc( PVRTC1_2BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_2BPP, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC1_4BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_4BPP, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_2BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_4BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( PVRTC1_2BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_2BPP_sRGB, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC1_4BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_4BPP_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_2BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_4BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_2BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_2BPP, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_4BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_4BPP, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_2BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_4BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_2BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_2BPP_sRGB, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_4BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_4BPP_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_2BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_4BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); // Extension VK_KHR_sampler_ycbcr_conversion - addVkFormatDescChromaSubsampling( G8B8G8R8_422_UNORM, GBGR422, 1, 8, 2, 1, 4 ); - addVkFormatDescChromaSubsampling( B8G8R8G8_422_UNORM, BGRG422, 1, 8, 2, 1, 4 ); - addVkFormatDescChromaSubsampling( G8_B8_R8_3PLANE_420_UNORM, Invalid, 3, 8, 2, 2, 6 ); - addVkFormatDescChromaSubsampling( G8_B8R8_2PLANE_420_UNORM, Invalid, 2, 8, 2, 2, 6 ); - addVkFormatDescChromaSubsampling( G8_B8_R8_3PLANE_422_UNORM, Invalid, 3, 8, 2, 1, 4 ); - addVkFormatDescChromaSubsampling( G8_B8R8_2PLANE_422_UNORM, Invalid, 2, 8, 2, 1, 4 ); - addVkFormatDescChromaSubsampling( G8_B8_R8_3PLANE_444_UNORM, Invalid, 3, 8, 1, 1, 3 ); - addVkFormatDescChromaSubsampling( R10X6_UNORM_PACK16, R16Unorm, 0, 10, 1, 1, 2 ); - addVkFormatDescChromaSubsampling( R10X6G10X6_UNORM_2PACK16, RG16Unorm, 0, 10, 1, 1, 4 ); - addVkFormatDescChromaSubsampling( R10X6G10X6B10X6A10X6_UNORM_4PACK16, RGBA16Unorm, 0, 10, 1, 1, 8 ); - addVkFormatDescChromaSubsampling( G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, Invalid, 1, 10, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, Invalid, 1, 10, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, Invalid, 3, 10, 2, 2, 12 ); - addVkFormatDescChromaSubsampling( G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, Invalid, 2, 10, 2, 2, 12 ); - addVkFormatDescChromaSubsampling( G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, Invalid, 3, 10, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, Invalid, 2, 10, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, Invalid, 3, 10, 1, 1, 6 ); - addVkFormatDescChromaSubsampling( R12X4_UNORM_PACK16, R16Unorm, 0, 12, 1, 1, 2 ); - addVkFormatDescChromaSubsampling( R12X4G12X4_UNORM_2PACK16, RG16Unorm, 0, 12, 1, 1, 4 ); - addVkFormatDescChromaSubsampling( R12X4G12X4B12X4A12X4_UNORM_4PACK16, RGBA16Unorm, 0, 12, 1, 1, 8 ); - addVkFormatDescChromaSubsampling( G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, Invalid, 1, 12, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, Invalid, 1, 12, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, Invalid, 3, 12, 2, 2, 12 ); - addVkFormatDescChromaSubsampling( G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, Invalid, 2, 12, 2, 2, 12 ); - addVkFormatDescChromaSubsampling( G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, Invalid, 3, 12, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, Invalid, 2, 12, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, Invalid, 3, 12, 1, 1, 6 ); - addVkFormatDescChromaSubsampling( G16B16G16R16_422_UNORM, Invalid, 1, 16, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( B16G16R16G16_422_UNORM, Invalid, 1, 16, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G16_B16_R16_3PLANE_420_UNORM, Invalid, 3, 16, 2, 2, 12 ); - addVkFormatDescChromaSubsampling( G16_B16R16_2PLANE_420_UNORM, Invalid, 2, 16, 2, 2, 12 ); - addVkFormatDescChromaSubsampling( G16_B16_R16_3PLANE_422_UNORM, Invalid, 3, 16, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G16_B16R16_2PLANE_422_UNORM, Invalid, 2, 16, 2, 1, 8 ); - addVkFormatDescChromaSubsampling( G16_B16_R16_3PLANE_444_UNORM, Invalid, 3, 16, 1, 1, 6 ); + addVkFormatDescChromaSubsampling( G8B8G8R8_422_UNORM, GBGR422, 1, 8, 2, 1, 4 ); + addVkFormatDescChromaSubsampling( B8G8R8G8_422_UNORM, BGRG422, 1, 8, 2, 1, 4 ); + addVkFormatDescChromaSubsampling( G8_B8_R8_3PLANE_420_UNORM, Invalid, 3, 8, 2, 2, 6 ); + addVkFormatDescChromaSubsampling( G8_B8R8_2PLANE_420_UNORM, Invalid, 2, 8, 2, 2, 6 ); + addVkFormatDescChromaSubsampling( G8_B8_R8_3PLANE_422_UNORM, Invalid, 3, 8, 2, 1, 4 ); + addVkFormatDescChromaSubsampling( G8_B8R8_2PLANE_422_UNORM, Invalid, 2, 8, 2, 1, 4 ); + addVkFormatDescChromaSubsampling( G8_B8_R8_3PLANE_444_UNORM, Invalid, 3, 8, 1, 1, 3 ); + addVkFormatDescChromaSubsampling( R10X6_UNORM_PACK16, R16Unorm, 0, 10, 1, 1, 2 ); + addVkFormatDescChromaSubsampling( R10X6G10X6_UNORM_2PACK16, RG16Unorm, 0, 10, 1, 1, 4 ); + addVkFormatDescChromaSubsampling( R10X6G10X6B10X6A10X6_UNORM_4PACK16, RGBA16Unorm, 0, 10, 1, 1, 8 ); + addVkFormatDescChromaSubsampling( G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, Invalid, 1, 10, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, Invalid, 1, 10, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, Invalid, 3, 10, 2, 2, 12 ); + addVkFormatDescChromaSubsampling( G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, Invalid, 2, 10, 2, 2, 12 ); + addVkFormatDescChromaSubsampling( G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, Invalid, 3, 10, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, Invalid, 2, 10, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, Invalid, 3, 10, 1, 1, 6 ); + addVkFormatDescChromaSubsampling( R12X4_UNORM_PACK16, R16Unorm, 0, 12, 1, 1, 2 ); + addVkFormatDescChromaSubsampling( R12X4G12X4_UNORM_2PACK16, RG16Unorm, 0, 12, 1, 1, 4 ); + addVkFormatDescChromaSubsampling( R12X4G12X4B12X4A12X4_UNORM_4PACK16, RGBA16Unorm, 0, 12, 1, 1, 8 ); + addVkFormatDescChromaSubsampling( G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, Invalid, 1, 12, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, Invalid, 1, 12, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, Invalid, 3, 12, 2, 2, 12 ); + addVkFormatDescChromaSubsampling( G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, Invalid, 2, 12, 2, 2, 12 ); + addVkFormatDescChromaSubsampling( G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, Invalid, 3, 12, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, Invalid, 2, 12, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, Invalid, 3, 12, 1, 1, 6 ); + addVkFormatDescChromaSubsampling( G16B16G16R16_422_UNORM, Invalid, 1, 16, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( B16G16R16G16_422_UNORM, Invalid, 1, 16, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G16_B16_R16_3PLANE_420_UNORM, Invalid, 3, 16, 2, 2, 12 ); + addVkFormatDescChromaSubsampling( G16_B16R16_2PLANE_420_UNORM, Invalid, 2, 16, 2, 2, 12 ); + addVkFormatDescChromaSubsampling( G16_B16_R16_3PLANE_422_UNORM, Invalid, 3, 16, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G16_B16R16_2PLANE_422_UNORM, Invalid, 2, 16, 2, 1, 8 ); + addVkFormatDescChromaSubsampling( G16_B16_R16_3PLANE_444_UNORM, Invalid, 3, 16, 1, 1, 6 ); // When adding to this list, be sure to ensure _vkFormatCount is large enough for the format count } @@ -1144,147 +1144,147 @@ addMTLPixelFormatDesc ( Invalid, None, None, None ); // Ordinary 8-bit pixel formats - addMTLPixelFormatDesc ( A8Unorm, Color8, RF, RF ); - addMTLPixelFormatDesc ( R8Unorm, Color8, All, All ); + addMTLPixelFormatDesc ( A8Unorm, Color8, RF, RF ); + addMTLPixelFormatDesc ( R8Unorm, Color8, All, All ); addMTLPixelFormatDescSRGB( R8Unorm_sRGB, Color8, RFCMRB, None, R8Unorm ); - addMTLPixelFormatDesc ( R8Snorm, Color8, RFWCMB, All ); - addMTLPixelFormatDesc ( R8Uint, Color8, RWCM, RWCM ); - addMTLPixelFormatDesc ( R8Sint, Color8, RWCM, RWCM ); + addMTLPixelFormatDesc ( R8Snorm, Color8, RFWCMB, All ); + addMTLPixelFormatDesc ( R8Uint, Color8, RWCM, RWCM ); + addMTLPixelFormatDesc ( R8Sint, Color8, RWCM, RWCM ); // Ordinary 16-bit pixel formats addMTLPixelFormatDesc ( R16Unorm, Color16, RFWCMB, All ); addMTLPixelFormatDesc ( R16Snorm, Color16, RFWCMB, All ); - addMTLPixelFormatDesc ( R16Uint, Color16, RWCM, RWCM ); - addMTLPixelFormatDesc ( R16Sint, Color16, RWCM, RWCM ); - addMTLPixelFormatDesc ( R16Float, Color16, All, All ); + addMTLPixelFormatDesc ( R16Uint, Color16, RWCM, RWCM ); + addMTLPixelFormatDesc ( R16Sint, Color16, RWCM, RWCM ); + addMTLPixelFormatDesc ( R16Float, Color16, All, All ); - addMTLPixelFormatDesc ( RG8Unorm, Color16, All, All ); + addMTLPixelFormatDesc ( RG8Unorm, Color16, All, All ); addMTLPixelFormatDescSRGB( RG8Unorm_sRGB, Color16, RFCMRB, None, RG8Unorm ); - addMTLPixelFormatDesc ( RG8Snorm, Color16, RFWCMB, All ); - addMTLPixelFormatDesc ( RG8Uint, Color16, RWCM, RWCM ); - addMTLPixelFormatDesc ( RG8Sint, Color16, RWCM, RWCM ); + addMTLPixelFormatDesc ( RG8Snorm, Color16, RFWCMB, All ); + addMTLPixelFormatDesc ( RG8Uint, Color16, RWCM, RWCM ); + addMTLPixelFormatDesc ( RG8Sint, Color16, RWCM, RWCM ); // Packed 16-bit pixel formats addMTLPixelFormatDesc ( B5G6R5Unorm, Color16, RFCMRB, None ); addMTLPixelFormatDesc ( A1BGR5Unorm, Color16, RFCMRB, None ); - addMTLPixelFormatDesc ( ABGR4Unorm, Color16, RFCMRB, None ); + addMTLPixelFormatDesc ( ABGR4Unorm, Color16, RFCMRB, None ); addMTLPixelFormatDesc ( BGR5A1Unorm, Color16, RFCMRB, None ); // Ordinary 32-bit pixel formats - addMTLPixelFormatDesc ( R32Uint, Color32, RC, RWCM ); - addMTLPixelFormatDesc ( R32Sint, Color32, RC, RWCM ); + addMTLPixelFormatDesc ( R32Uint, Color32, RC, RWCM ); + addMTLPixelFormatDesc ( R32Sint, Color32, RC, RWCM ); addMTLPixelFormatDesc ( R32Float, Color32, RCMB, All ); addMTLPixelFormatDesc ( RG16Unorm, Color32, RFWCMB, All ); addMTLPixelFormatDesc ( RG16Snorm, Color32, RFWCMB, All ); - addMTLPixelFormatDesc ( RG16Uint, Color32, RWCM, RWCM ); - addMTLPixelFormatDesc ( RG16Sint, Color32, RWCM, RWCM ); - addMTLPixelFormatDesc ( RG16Float, Color32, All, All ); + addMTLPixelFormatDesc ( RG16Uint, Color32, RWCM, RWCM ); + addMTLPixelFormatDesc ( RG16Sint, Color32, RWCM, RWCM ); + addMTLPixelFormatDesc ( RG16Float, Color32, All, All ); - addMTLPixelFormatDesc ( RGBA8Unorm, Color32, All, All ); + addMTLPixelFormatDesc ( RGBA8Unorm, Color32, All, All ); addMTLPixelFormatDescSRGB( RGBA8Unorm_sRGB, Color32, RFCMRB, RFCMRB, RGBA8Unorm ); - addMTLPixelFormatDesc ( RGBA8Snorm, Color32, RFWCMB, All ); - addMTLPixelFormatDesc ( RGBA8Uint, Color32, RWCM, RWCM ); - addMTLPixelFormatDesc ( RGBA8Sint, Color32, RWCM, RWCM ); + addMTLPixelFormatDesc ( RGBA8Snorm, Color32, RFWCMB, All ); + addMTLPixelFormatDesc ( RGBA8Uint, Color32, RWCM, RWCM ); + addMTLPixelFormatDesc ( RGBA8Sint, Color32, RWCM, RWCM ); - addMTLPixelFormatDesc ( BGRA8Unorm, Color32, All, All ); + addMTLPixelFormatDesc ( BGRA8Unorm, Color32, All, All ); addMTLPixelFormatDescSRGB( BGRA8Unorm_sRGB, Color32, RFCMRB, RFCMRB, BGRA8Unorm ); // Packed 32-bit pixel formats addMTLPixelFormatDesc ( RGB10A2Unorm, Color32, RFCMRB, All ); - addMTLPixelFormatDesc ( RGB10A2Uint, Color32, RCM, RWCM ); + addMTLPixelFormatDesc ( RGB10A2Uint, Color32, RCM, RWCM ); addMTLPixelFormatDesc ( RG11B10Float, Color32, RFCMRB, All ); - addMTLPixelFormatDesc ( RGB9E5Float, Color32, RFCMRB, RF ); + addMTLPixelFormatDesc ( RGB9E5Float, Color32, RFCMRB, RF ); // Ordinary 64-bit pixel formats - addMTLPixelFormatDesc ( RG32Uint, Color64, RC, RWCM ); - addMTLPixelFormatDesc ( RG32Sint, Color64, RC, RWCM ); + addMTLPixelFormatDesc ( RG32Uint, Color64, RC, RWCM ); + addMTLPixelFormatDesc ( RG32Sint, Color64, RC, RWCM ); addMTLPixelFormatDesc ( RG32Float, Color64, RCB, All ); addMTLPixelFormatDesc ( RGBA16Unorm, Color64, RFWCMB, All ); addMTLPixelFormatDesc ( RGBA16Snorm, Color64, RFWCMB, All ); - addMTLPixelFormatDesc ( RGBA16Uint, Color64, RWCM, RWCM ); - addMTLPixelFormatDesc ( RGBA16Sint, Color64, RWCM, RWCM ); - addMTLPixelFormatDesc ( RGBA16Float, Color64, All, All ); + addMTLPixelFormatDesc ( RGBA16Uint, Color64, RWCM, RWCM ); + addMTLPixelFormatDesc ( RGBA16Sint, Color64, RWCM, RWCM ); + addMTLPixelFormatDesc ( RGBA16Float, Color64, All, All ); // Ordinary 128-bit pixel formats - addMTLPixelFormatDesc ( RGBA32Uint, Color128, RC, RWCM ); - addMTLPixelFormatDesc ( RGBA32Sint, Color128, RC, RWCM ); + addMTLPixelFormatDesc ( RGBA32Uint, Color128, RC, RWCM ); + addMTLPixelFormatDesc ( RGBA32Sint, Color128, RC, RWCM ); addMTLPixelFormatDesc ( RGBA32Float, Color128, RC, All ); // Compressed pixel formats - addMTLPixelFormatDesc ( PVRTC_RGBA_2BPP, PVRTC_RGBA_2BPP, RF, None ); + addMTLPixelFormatDesc ( PVRTC_RGBA_2BPP, PVRTC_RGBA_2BPP, RF, None ); addMTLPixelFormatDescSRGB( PVRTC_RGBA_2BPP_sRGB, PVRTC_RGBA_2BPP, RF, None, PVRTC_RGBA_2BPP ); - addMTLPixelFormatDesc ( PVRTC_RGBA_4BPP, PVRTC_RGBA_4BPP, RF, None ); + addMTLPixelFormatDesc ( PVRTC_RGBA_4BPP, PVRTC_RGBA_4BPP, RF, None ); addMTLPixelFormatDescSRGB( PVRTC_RGBA_4BPP_sRGB, PVRTC_RGBA_4BPP, RF, None, PVRTC_RGBA_4BPP ); - addMTLPixelFormatDesc ( ETC2_RGB8, ETC2_RGB8, RF, None ); - addMTLPixelFormatDescSRGB( ETC2_RGB8_sRGB, ETC2_RGB8, RF, None, ETC2_RGB8 ); - addMTLPixelFormatDesc ( ETC2_RGB8A1, ETC2_RGB8A1, RF, None ); + addMTLPixelFormatDesc ( ETC2_RGB8, ETC2_RGB8, RF, None ); + addMTLPixelFormatDescSRGB( ETC2_RGB8_sRGB, ETC2_RGB8, RF, None, ETC2_RGB8 ); + addMTLPixelFormatDesc ( ETC2_RGB8A1, ETC2_RGB8A1, RF, None ); addMTLPixelFormatDescSRGB( ETC2_RGB8A1_sRGB, ETC2_RGB8A1, RF, None, ETC2_RGB8A1 ); - addMTLPixelFormatDesc ( EAC_RGBA8, EAC_RGBA8, RF, None ); - addMTLPixelFormatDescSRGB( EAC_RGBA8_sRGB, EAC_RGBA8, RF, None, EAC_RGBA8 ); - addMTLPixelFormatDesc ( EAC_R11Unorm, EAC_R11, RF, None ); - addMTLPixelFormatDesc ( EAC_R11Snorm, EAC_R11, RF, None ); - addMTLPixelFormatDesc ( EAC_RG11Unorm, EAC_RG11, RF, None ); - addMTLPixelFormatDesc ( EAC_RG11Snorm, EAC_RG11, RF, None ); - - addMTLPixelFormatDesc ( ASTC_4x4_LDR, ASTC_4x4, None, None ); - addMTLPixelFormatDescSRGB( ASTC_4x4_sRGB, ASTC_4x4, None, None, ASTC_4x4_LDR ); - addMTLPixelFormatDesc ( ASTC_4x4_HDR, ASTC_4x4, None, None ); - addMTLPixelFormatDesc ( ASTC_5x4_LDR, ASTC_5x4, None, None ); - addMTLPixelFormatDescSRGB( ASTC_5x4_sRGB, ASTC_5x4, None, None, ASTC_5x4_LDR ); - addMTLPixelFormatDesc ( ASTC_5x4_HDR, ASTC_5x4, None, None ); - addMTLPixelFormatDesc ( ASTC_5x5_LDR, ASTC_5x5, None, None ); - addMTLPixelFormatDescSRGB( ASTC_5x5_sRGB, ASTC_5x5, None, None, ASTC_5x5_LDR ); - addMTLPixelFormatDesc ( ASTC_5x5_HDR, ASTC_5x5, None, None ); - addMTLPixelFormatDesc ( ASTC_6x5_LDR, ASTC_6x5, None, None ); - addMTLPixelFormatDescSRGB( ASTC_6x5_sRGB, ASTC_6x5, None, None, ASTC_6x5_LDR ); - addMTLPixelFormatDesc ( ASTC_6x5_HDR, ASTC_6x5, None, None ); - addMTLPixelFormatDesc ( ASTC_6x6_LDR, ASTC_6x6, None, None ); - addMTLPixelFormatDescSRGB( ASTC_6x6_sRGB, ASTC_6x6, None, None, ASTC_6x6_LDR ); - addMTLPixelFormatDesc ( ASTC_6x6_HDR, ASTC_6x6, None, None ); - addMTLPixelFormatDesc ( ASTC_8x5_LDR, ASTC_8x5, None, None ); - addMTLPixelFormatDescSRGB( ASTC_8x5_sRGB, ASTC_8x5, None, None, ASTC_8x5_LDR ); - addMTLPixelFormatDesc ( ASTC_8x5_HDR, ASTC_8x5, None, None ); - addMTLPixelFormatDesc ( ASTC_8x6_LDR, ASTC_8x6, None, None ); - addMTLPixelFormatDescSRGB( ASTC_8x6_sRGB, ASTC_8x6, None, None, ASTC_8x6_LDR ); - addMTLPixelFormatDesc ( ASTC_8x6_HDR, ASTC_8x6, None, None ); - addMTLPixelFormatDesc ( ASTC_8x8_LDR, ASTC_8x8, None, None ); - addMTLPixelFormatDescSRGB( ASTC_8x8_sRGB, ASTC_8x8, None, None, ASTC_8x8_LDR ); - addMTLPixelFormatDesc ( ASTC_8x8_HDR, ASTC_8x8, None, None ); - addMTLPixelFormatDesc ( ASTC_10x5_LDR, ASTC_10x5, None, None ); - addMTLPixelFormatDescSRGB( ASTC_10x5_sRGB, ASTC_10x5, None, None, ASTC_10x5_LDR ); - addMTLPixelFormatDesc ( ASTC_10x5_HDR, ASTC_10x5, None, None ); - addMTLPixelFormatDesc ( ASTC_10x6_LDR, ASTC_10x6, None, None ); - addMTLPixelFormatDescSRGB( ASTC_10x6_sRGB, ASTC_10x6, None, None, ASTC_10x6_LDR ); - addMTLPixelFormatDesc ( ASTC_10x6_HDR, ASTC_10x6, None, None ); - addMTLPixelFormatDesc ( ASTC_10x8_LDR, ASTC_10x8, None, None ); - addMTLPixelFormatDescSRGB( ASTC_10x8_sRGB, ASTC_10x8, None, None, ASTC_10x8_LDR ); - addMTLPixelFormatDesc ( ASTC_10x8_HDR, ASTC_10x8, None, None ); - addMTLPixelFormatDesc ( ASTC_10x10_LDR, ASTC_10x10, None, None ); + addMTLPixelFormatDesc ( EAC_RGBA8, EAC_RGBA8, RF, None ); + addMTLPixelFormatDescSRGB( EAC_RGBA8_sRGB, EAC_RGBA8, RF, None, EAC_RGBA8 ); + addMTLPixelFormatDesc ( EAC_R11Unorm, EAC_R11, RF, None ); + addMTLPixelFormatDesc ( EAC_R11Snorm, EAC_R11, RF, None ); + addMTLPixelFormatDesc ( EAC_RG11Unorm, EAC_RG11, RF, None ); + addMTLPixelFormatDesc ( EAC_RG11Snorm, EAC_RG11, RF, None ); + + addMTLPixelFormatDesc ( ASTC_4x4_LDR, ASTC_4x4, None, None ); + addMTLPixelFormatDescSRGB( ASTC_4x4_sRGB, ASTC_4x4, None, None, ASTC_4x4_LDR ); + addMTLPixelFormatDesc ( ASTC_4x4_HDR, ASTC_4x4, None, None ); + addMTLPixelFormatDesc ( ASTC_5x4_LDR, ASTC_5x4, None, None ); + addMTLPixelFormatDescSRGB( ASTC_5x4_sRGB, ASTC_5x4, None, None, ASTC_5x4_LDR ); + addMTLPixelFormatDesc ( ASTC_5x4_HDR, ASTC_5x4, None, None ); + addMTLPixelFormatDesc ( ASTC_5x5_LDR, ASTC_5x5, None, None ); + addMTLPixelFormatDescSRGB( ASTC_5x5_sRGB, ASTC_5x5, None, None, ASTC_5x5_LDR ); + addMTLPixelFormatDesc ( ASTC_5x5_HDR, ASTC_5x5, None, None ); + addMTLPixelFormatDesc ( ASTC_6x5_LDR, ASTC_6x5, None, None ); + addMTLPixelFormatDescSRGB( ASTC_6x5_sRGB, ASTC_6x5, None, None, ASTC_6x5_LDR ); + addMTLPixelFormatDesc ( ASTC_6x5_HDR, ASTC_6x5, None, None ); + addMTLPixelFormatDesc ( ASTC_6x6_LDR, ASTC_6x6, None, None ); + addMTLPixelFormatDescSRGB( ASTC_6x6_sRGB, ASTC_6x6, None, None, ASTC_6x6_LDR ); + addMTLPixelFormatDesc ( ASTC_6x6_HDR, ASTC_6x6, None, None ); + addMTLPixelFormatDesc ( ASTC_8x5_LDR, ASTC_8x5, None, None ); + addMTLPixelFormatDescSRGB( ASTC_8x5_sRGB, ASTC_8x5, None, None, ASTC_8x5_LDR ); + addMTLPixelFormatDesc ( ASTC_8x5_HDR, ASTC_8x5, None, None ); + addMTLPixelFormatDesc ( ASTC_8x6_LDR, ASTC_8x6, None, None ); + addMTLPixelFormatDescSRGB( ASTC_8x6_sRGB, ASTC_8x6, None, None, ASTC_8x6_LDR ); + addMTLPixelFormatDesc ( ASTC_8x6_HDR, ASTC_8x6, None, None ); + addMTLPixelFormatDesc ( ASTC_8x8_LDR, ASTC_8x8, None, None ); + addMTLPixelFormatDescSRGB( ASTC_8x8_sRGB, ASTC_8x8, None, None, ASTC_8x8_LDR ); + addMTLPixelFormatDesc ( ASTC_8x8_HDR, ASTC_8x8, None, None ); + addMTLPixelFormatDesc ( ASTC_10x5_LDR, ASTC_10x5, None, None ); + addMTLPixelFormatDescSRGB( ASTC_10x5_sRGB, ASTC_10x5, None, None, ASTC_10x5_LDR ); + addMTLPixelFormatDesc ( ASTC_10x5_HDR, ASTC_10x5, None, None ); + addMTLPixelFormatDesc ( ASTC_10x6_LDR, ASTC_10x6, None, None ); + addMTLPixelFormatDescSRGB( ASTC_10x6_sRGB, ASTC_10x6, None, None, ASTC_10x6_LDR ); + addMTLPixelFormatDesc ( ASTC_10x6_HDR, ASTC_10x6, None, None ); + addMTLPixelFormatDesc ( ASTC_10x8_LDR, ASTC_10x8, None, None ); + addMTLPixelFormatDescSRGB( ASTC_10x8_sRGB, ASTC_10x8, None, None, ASTC_10x8_LDR ); + addMTLPixelFormatDesc ( ASTC_10x8_HDR, ASTC_10x8, None, None ); + addMTLPixelFormatDesc ( ASTC_10x10_LDR, ASTC_10x10, None, None ); addMTLPixelFormatDescSRGB( ASTC_10x10_sRGB, ASTC_10x10, None, None, ASTC_10x10_LDR ); - addMTLPixelFormatDesc ( ASTC_10x10_HDR, ASTC_10x10, None, None ); - addMTLPixelFormatDesc ( ASTC_12x10_LDR, ASTC_12x10, None, None ); + addMTLPixelFormatDesc ( ASTC_10x10_HDR, ASTC_10x10, None, None ); + addMTLPixelFormatDesc ( ASTC_12x10_LDR, ASTC_12x10, None, None ); addMTLPixelFormatDescSRGB( ASTC_12x10_sRGB, ASTC_12x10, None, None, ASTC_12x10_LDR ); - addMTLPixelFormatDesc ( ASTC_12x10_HDR, ASTC_12x10, None, None ); - addMTLPixelFormatDesc ( ASTC_12x12_LDR, ASTC_12x12, None, None ); + addMTLPixelFormatDesc ( ASTC_12x10_HDR, ASTC_12x10, None, None ); + addMTLPixelFormatDesc ( ASTC_12x12_LDR, ASTC_12x12, None, None ); addMTLPixelFormatDescSRGB( ASTC_12x12_sRGB, ASTC_12x12, None, None, ASTC_12x12_LDR ); - addMTLPixelFormatDesc ( ASTC_12x12_HDR, ASTC_12x12, None, None ); - - addMTLPixelFormatDesc ( BC1_RGBA, BC1_RGBA, RF, RF ); - addMTLPixelFormatDescSRGB( BC1_RGBA_sRGB, BC1_RGBA, RF, RF, BC1_RGBA ); - addMTLPixelFormatDesc ( BC2_RGBA, BC2_RGBA, RF, RF ); - addMTLPixelFormatDescSRGB( BC2_RGBA_sRGB, BC2_RGBA, RF, RF, BC2_RGBA ); - addMTLPixelFormatDesc ( BC3_RGBA, BC3_RGBA, RF, RF ); - addMTLPixelFormatDescSRGB( BC3_RGBA_sRGB, BC3_RGBA, RF, RF, BC3_RGBA ); - addMTLPixelFormatDesc ( BC4_RUnorm, BC4_R, RF, RF ); - addMTLPixelFormatDesc ( BC4_RSnorm, BC4_R, RF, RF ); - addMTLPixelFormatDesc ( BC5_RGUnorm, BC5_RG, RF, RF ); - addMTLPixelFormatDesc ( BC5_RGSnorm, BC5_RG, RF, RF ); - addMTLPixelFormatDesc ( BC6H_RGBUfloat, BC6H_RGB, RF, RF ); - addMTLPixelFormatDesc ( BC6H_RGBFloat, BC6H_RGB, RF, RF ); - addMTLPixelFormatDesc ( BC7_RGBAUnorm, BC7_RGBA, RF, RF ); + addMTLPixelFormatDesc ( ASTC_12x12_HDR, ASTC_12x12, None, None ); + + addMTLPixelFormatDesc ( BC1_RGBA, BC1_RGBA, RF, RF ); + addMTLPixelFormatDescSRGB( BC1_RGBA_sRGB, BC1_RGBA, RF, RF, BC1_RGBA ); + addMTLPixelFormatDesc ( BC2_RGBA, BC2_RGBA, RF, RF ); + addMTLPixelFormatDescSRGB( BC2_RGBA_sRGB, BC2_RGBA, RF, RF, BC2_RGBA ); + addMTLPixelFormatDesc ( BC3_RGBA, BC3_RGBA, RF, RF ); + addMTLPixelFormatDescSRGB( BC3_RGBA_sRGB, BC3_RGBA, RF, RF, BC3_RGBA ); + addMTLPixelFormatDesc ( BC4_RUnorm, BC4_R, RF, RF ); + addMTLPixelFormatDesc ( BC4_RSnorm, BC4_R, RF, RF ); + addMTLPixelFormatDesc ( BC5_RGUnorm, BC5_RG, RF, RF ); + addMTLPixelFormatDesc ( BC5_RGSnorm, BC5_RG, RF, RF ); + addMTLPixelFormatDesc ( BC6H_RGBUfloat, BC6H_RGB, RF, RF ); + addMTLPixelFormatDesc ( BC6H_RGBFloat, BC6H_RGB, RF, RF ); + addMTLPixelFormatDesc ( BC7_RGBAUnorm, BC7_RGBA, RF, RF ); addMTLPixelFormatDescSRGB( BC7_RGBAUnorm_sRGB, BC7_RGBA, RF, RF, BC7_RGBAUnorm ); // YUV pixel formats @@ -1292,20 +1292,20 @@ addMTLPixelFormatDesc ( BGRG422, None, RF, RF ); // Extended range and wide color pixel formats - addMTLPixelFormatDesc ( BGRA10_XR, BGRA10_XR, None, None ); + addMTLPixelFormatDesc ( BGRA10_XR, BGRA10_XR, None, None ); addMTLPixelFormatDescSRGB( BGRA10_XR_sRGB, BGRA10_XR, None, None, BGRA10_XR ); - addMTLPixelFormatDesc ( BGR10_XR, BGR10_XR, None, None ); - addMTLPixelFormatDescSRGB( BGR10_XR_sRGB, BGR10_XR, None, None, BGR10_XR ); - addMTLPixelFormatDesc ( BGR10A2Unorm, Color32, None, None ); + addMTLPixelFormatDesc ( BGR10_XR, BGR10_XR, None, None ); + addMTLPixelFormatDescSRGB( BGR10_XR_sRGB, BGR10_XR, None, None, BGR10_XR ); + addMTLPixelFormatDesc ( BGR10A2Unorm, Color32, None, None ); // Depth and stencil pixel formats - addMTLPixelFormatDesc ( Depth16Unorm, None, None, None ); - addMTLPixelFormatDesc ( Depth32Float, None, DRM, DRFMR ); - addMTLPixelFormatDesc ( Stencil8, None, DRM, DRMR ); + addMTLPixelFormatDesc ( Depth16Unorm, None, None, None ); + addMTLPixelFormatDesc ( Depth32Float, None, DRM, DRFMR ); + addMTLPixelFormatDesc ( Stencil8, None, DRM, DRMR ); addMTLPixelFormatDesc ( Depth24Unorm_Stencil8, Depth24_Stencil8, None, None ); - addMTLPixelFormatDesc ( Depth32Float_Stencil8, Depth32_Stencil8, DRM, DRFMR ); - addMTLPixelFormatDesc ( X24_Stencil8, Depth24_Stencil8, None, DRMR ); - addMTLPixelFormatDesc ( X32_Stencil8, Depth32_Stencil8, DRM, DRMR ); + addMTLPixelFormatDesc ( Depth32Float_Stencil8, Depth32_Stencil8, DRM, DRFMR ); + addMTLPixelFormatDesc ( X24_Stencil8, Depth24_Stencil8, None, DRMR ); + addMTLPixelFormatDesc ( X32_Stencil8, Depth32_Stencil8, DRM, DRMR ); // When adding to this list, be sure to ensure _mtlPixelFormatCount is large enough for the format count } @@ -1328,67 +1328,67 @@ addMTLVertexFormatDesc( Invalid, None, None ); addMTLVertexFormatDesc( UChar2Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Char2Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( UChar2, Vertex, Vertex ); - addMTLVertexFormatDesc( Char2, Vertex, Vertex ); + addMTLVertexFormatDesc( Char2Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( UChar2, Vertex, Vertex ); + addMTLVertexFormatDesc( Char2, Vertex, Vertex ); addMTLVertexFormatDesc( UChar3Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Char3Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( UChar3, Vertex, Vertex ); - addMTLVertexFormatDesc( Char3, Vertex, Vertex ); + addMTLVertexFormatDesc( Char3Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( UChar3, Vertex, Vertex ); + addMTLVertexFormatDesc( Char3, Vertex, Vertex ); addMTLVertexFormatDesc( UChar4Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Char4Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( UChar4, Vertex, Vertex ); - addMTLVertexFormatDesc( Char4, Vertex, Vertex ); + addMTLVertexFormatDesc( Char4Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( UChar4, Vertex, Vertex ); + addMTLVertexFormatDesc( Char4, Vertex, Vertex ); addMTLVertexFormatDesc( UInt1010102Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Int1010102Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( Int1010102Normalized, Vertex, Vertex ); addMTLVertexFormatDesc( UShort2Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Short2Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( UShort2, Vertex, Vertex ); - addMTLVertexFormatDesc( Short2, Vertex, Vertex ); - addMTLVertexFormatDesc( Half2, Vertex, Vertex ); + addMTLVertexFormatDesc( Short2Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( UShort2, Vertex, Vertex ); + addMTLVertexFormatDesc( Short2, Vertex, Vertex ); + addMTLVertexFormatDesc( Half2, Vertex, Vertex ); addMTLVertexFormatDesc( UShort3Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Short3Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( UShort3, Vertex, Vertex ); - addMTLVertexFormatDesc( Short3, Vertex, Vertex ); - addMTLVertexFormatDesc( Half3, Vertex, Vertex ); + addMTLVertexFormatDesc( Short3Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( UShort3, Vertex, Vertex ); + addMTLVertexFormatDesc( Short3, Vertex, Vertex ); + addMTLVertexFormatDesc( Half3, Vertex, Vertex ); addMTLVertexFormatDesc( UShort4Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( Short4Normalized, Vertex, Vertex ); - addMTLVertexFormatDesc( UShort4, Vertex, Vertex ); - addMTLVertexFormatDesc( Short4, Vertex, Vertex ); - addMTLVertexFormatDesc( Half4, Vertex, Vertex ); + addMTLVertexFormatDesc( Short4Normalized, Vertex, Vertex ); + addMTLVertexFormatDesc( UShort4, Vertex, Vertex ); + addMTLVertexFormatDesc( Short4, Vertex, Vertex ); + addMTLVertexFormatDesc( Half4, Vertex, Vertex ); - addMTLVertexFormatDesc( UInt, Vertex, Vertex ); - addMTLVertexFormatDesc( Int, Vertex, Vertex ); + addMTLVertexFormatDesc( UInt, Vertex, Vertex ); + addMTLVertexFormatDesc( Int, Vertex, Vertex ); addMTLVertexFormatDesc( Float, Vertex, Vertex ); - addMTLVertexFormatDesc( UInt2, Vertex, Vertex ); - addMTLVertexFormatDesc( Int2, Vertex, Vertex ); + addMTLVertexFormatDesc( UInt2, Vertex, Vertex ); + addMTLVertexFormatDesc( Int2, Vertex, Vertex ); addMTLVertexFormatDesc( Float2, Vertex, Vertex ); - addMTLVertexFormatDesc( UInt3, Vertex, Vertex ); - addMTLVertexFormatDesc( Int3, Vertex, Vertex ); + addMTLVertexFormatDesc( UInt3, Vertex, Vertex ); + addMTLVertexFormatDesc( Int3, Vertex, Vertex ); addMTLVertexFormatDesc( Float3, Vertex, Vertex ); - addMTLVertexFormatDesc( UInt4, Vertex, Vertex ); - addMTLVertexFormatDesc( Int4, Vertex, Vertex ); + addMTLVertexFormatDesc( UInt4, Vertex, Vertex ); + addMTLVertexFormatDesc( Int4, Vertex, Vertex ); addMTLVertexFormatDesc( Float4, Vertex, Vertex ); addMTLVertexFormatDesc( UCharNormalized, None, None ); - addMTLVertexFormatDesc( CharNormalized, None, None ); - addMTLVertexFormatDesc( UChar, None, None ); - addMTLVertexFormatDesc( Char, None, None ); + addMTLVertexFormatDesc( CharNormalized, None, None ); + addMTLVertexFormatDesc( UChar, None, None ); + addMTLVertexFormatDesc( Char, None, None ); addMTLVertexFormatDesc( UShortNormalized, None, None ); - addMTLVertexFormatDesc( ShortNormalized, None, None ); - addMTLVertexFormatDesc( UShort, None, None ); - addMTLVertexFormatDesc( Short, None, None ); - addMTLVertexFormatDesc( Half, None, None ); + addMTLVertexFormatDesc( ShortNormalized, None, None ); + addMTLVertexFormatDesc( UShort, None, None ); + addMTLVertexFormatDesc( Short, None, None ); + addMTLVertexFormatDesc( Half, None, None ); addMTLVertexFormatDesc( UChar4Normalized_BGRA, None, None ); From 12e1a36022f7ec3d9408d7151f5201d0b2188684 Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Wed, 6 Sep 2023 00:21:29 -0500 Subject: [PATCH 2/7] Enable shader vertex loader --- .../project.pbxproj | 10 + ExternalRevisions/README.md | 1 + ExternalRevisions/SPIRV-Cross_repo_revision | 2 +- MoltenVK/MoltenVK/API/mvk_config.h | 11 + MoltenVK/MoltenVK/API/mvk_private_api.h | 1 + MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 4 + MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h | 1 + MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 239 ++++++++++++++---- .../MoltenVK/GPUObjects/MVKPixelFormats.mm | 24 +- .../MoltenVK/Utility/MVKConfigMembers.def | 1 + MoltenVK/MoltenVK/Utility/MVKEnvironment.h | 5 + .../SPIRVToMSLConverter.cpp | 52 +++- .../SPIRVToMSLConverter.h | 46 ++++ 13 files changed, 320 insertions(+), 77 deletions(-) diff --git a/ExternalDependencies.xcodeproj/project.pbxproj b/ExternalDependencies.xcodeproj/project.pbxproj index fdbcf75f2..efab79ec5 100644 --- a/ExternalDependencies.xcodeproj/project.pbxproj +++ b/ExternalDependencies.xcodeproj/project.pbxproj @@ -80,6 +80,10 @@ /* End PBXAggregateTarget section */ /* Begin PBXBuildFile section */ + 164DCE072AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 164DCE062AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp */; }; + 164DCE082AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 164DCE062AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp */; }; + 164DCE092AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 164DCE062AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp */; }; + 164DCE0A2AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 164DCE062AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp */; }; 2FEA0CDB2490322B00EEF3AD /* spirv_cfg.hpp in Headers */ = {isa = PBXBuildFile; fileRef = A976290921CC60BC00B52A68 /* spirv_cfg.hpp */; }; 2FEA0CDC2490322B00EEF3AD /* spirv_cross_parsed_ir.hpp in Headers */ = {isa = PBXBuildFile; fileRef = A976290821CC60BC00B52A68 /* spirv_cross_parsed_ir.hpp */; }; 2FEA0CDD2490322B00EEF3AD /* spirv_common.hpp in Headers */ = {isa = PBXBuildFile; fileRef = A976290721CC60BC00B52A68 /* spirv_common.hpp */; }; @@ -2367,6 +2371,7 @@ /* End PBXContainerItemProxy section */ /* Begin PBXFileReference section */ + 164DCE062AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = spirv_msl_vertex_loader.cpp; sourceTree = ""; }; 2FEA0B4A2490321700EEF3AD /* libglslang.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libglslang.a; sourceTree = BUILT_PRODUCTS_DIR; }; 2FEA0CD82490322100EEF3AD /* libSPIRVTools.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libSPIRVTools.a; sourceTree = BUILT_PRODUCTS_DIR; }; 2FEA0CEF2490322B00EEF3AD /* libSPIRVCross.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libSPIRVCross.a; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -3019,6 +3024,7 @@ A976290321CC60BC00B52A68 /* spirv_cross.hpp */, A976290621CC60BC00B52A68 /* spirv_glsl.cpp */, A976290A21CC60BC00B52A68 /* spirv_glsl.hpp */, + 164DCE062AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp */, A976290D21CC60BC00B52A68 /* spirv_msl.cpp */, A976290221CC60BC00B52A68 /* spirv_msl.hpp */, A976290421CC60BC00B52A68 /* spirv_parser.cpp */, @@ -5813,6 +5819,7 @@ 2FEA0CE72490322B00EEF3AD /* spirv_cross.cpp in Sources */, 2FEA0CE82490322B00EEF3AD /* spirv_reflect.cpp in Sources */, 2FEA0CE92490322B00EEF3AD /* spirv_glsl.cpp in Sources */, + 164DCE082AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */, 2FEA0CEA2490322B00EEF3AD /* spirv_cross_parsed_ir.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -6385,6 +6392,7 @@ A976291621CC60BC00B52A68 /* spirv_cross.cpp in Sources */, 450A4F68221C5A95007203D7 /* spirv_reflect.cpp in Sources */, A976291821CC60BC00B52A68 /* spirv_glsl.cpp in Sources */, + 164DCE0A2AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */, A976292821CC60BC00B52A68 /* spirv_cross_parsed_ir.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -6399,6 +6407,7 @@ A976291521CC60BC00B52A68 /* spirv_cross.cpp in Sources */, 450A4F67221C5A95007203D7 /* spirv_reflect.cpp in Sources */, A976291721CC60BC00B52A68 /* spirv_glsl.cpp in Sources */, + 164DCE072AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */, A976292721CC60BC00B52A68 /* spirv_cross_parsed_ir.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -6413,6 +6422,7 @@ DCFD7C9B2A45BA7D007BBBF7 /* spirv_cross.cpp in Sources */, DCFD7C9C2A45BA7D007BBBF7 /* spirv_reflect.cpp in Sources */, DCFD7C9D2A45BA7D007BBBF7 /* spirv_glsl.cpp in Sources */, + 164DCE092AABA74B00E4B88F /* spirv_msl_vertex_loader.cpp in Sources */, DCFD7C9E2A45BA7D007BBBF7 /* spirv_cross_parsed_ir.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; diff --git a/ExternalRevisions/README.md b/ExternalRevisions/README.md index 7a8e31a26..d66ea1b4d 100644 --- a/ExternalRevisions/README.md +++ b/ExternalRevisions/README.md @@ -133,6 +133,7 @@ if you encounter any building errors, you may need to re-add the *SPIRV-Cross* l spirv_cross.hpp spirv_glsl.cpp spirv_glsl.hpp + spirv_msl_vertex_loader.cpp spirv_msl.cpp spirv_msl.hpp spirv_parser.cpp diff --git a/ExternalRevisions/SPIRV-Cross_repo_revision b/ExternalRevisions/SPIRV-Cross_repo_revision index 590969e2f..d95819092 100644 --- a/ExternalRevisions/SPIRV-Cross_repo_revision +++ b/ExternalRevisions/SPIRV-Cross_repo_revision @@ -1 +1 @@ -bccaa94db814af33d8ef05c153e7c34d8bd4d685 +324b9393f2a40d4883d280669886e701f12e22a6 diff --git a/MoltenVK/MoltenVK/API/mvk_config.h b/MoltenVK/MoltenVK/API/mvk_config.h index f72ef7770..96a29cbfb 100644 --- a/MoltenVK/MoltenVK/API/mvk_config.h +++ b/MoltenVK/MoltenVK/API/mvk_config.h @@ -961,6 +961,17 @@ typedef struct { */ float timestampPeriodLowPassAlpha; + /** + * Force MoltenVK to always use shader code to load vertices (rather than translate to Metal's vertex descriptors). + * (Default is to only use shader vertex loaders in situations that Metal's descriptors don't support.) + * + * The initial value or this parameter is set by the + * MVK_CONFIG_FORCE_SHADER_VERTEX_LOADER + * runtime environment variable or MoltenVK compile-time build setting. + * If neither is set, this setting is disabled by default. + */ + VkBool32 forceShaderVertexLoader; + } MVKConfiguration; diff --git a/MoltenVK/MoltenVK/API/mvk_private_api.h b/MoltenVK/MoltenVK/API/mvk_private_api.h index 8ed5b7547..ccfc4076a 100644 --- a/MoltenVK/MoltenVK/API/mvk_private_api.h +++ b/MoltenVK/MoltenVK/API/mvk_private_api.h @@ -149,6 +149,7 @@ typedef struct { VkBool32 programmableSamplePositions; /**< If true, programmable MSAA sample positions are supported. */ VkBool32 shaderBarycentricCoordinates; /**< If true, fragment shader barycentric coordinates are supported. */ MTLArgumentBuffersTier argumentBuffersTier; /**< The argument buffer tier available on this device, as a Metal enumeration. */ + VkBool32 pixelTypeLoads; /**< Supports loading from types like `device rgb9e5*` in MSL */ VkBool32 needsSampleDrefLodArrayWorkaround; /**< If true, sampling from arrayed depth images with explicit LoD is broken and needs a workaround. */ VkDeviceSize hostMemoryPageSize; /**< The size of a page of host memory on this platform. */ } MVKPhysicalDeviceMetalFeatures; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index 20bad33e8..3a54d6afd 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -1861,6 +1861,7 @@ if (supportsMTLFeatureSet(iOS_GPUFamily4_v1)) { _metalFeatures.postDepthCoverage = true; _metalFeatures.nonUniformThreadgroups = true; + _metalFeatures.pixelTypeLoads = true; } if (supportsMTLFeatureSet(iOS_GPUFamily5_v1)) { @@ -2025,6 +2026,9 @@ _metalFeatures.renderLinearTextures = true; _metalFeatures.tileBasedDeferredRendering = true; + if (supportsMTLGPUFamily(Apple4)) { + _metalFeatures.pixelTypeLoads = true; + } #if MVK_XCODE_12 if (supportsMTLGPUFamily(Apple6)) { _metalFeatures.astcHDRTextures = true; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h index 505e894c1..5f03f5e68 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h @@ -336,6 +336,7 @@ class MVKGraphicsPipeline : public MVKPipeline { bool addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& prevOutput, SPIRVShaderInputs& nextInputs, const VkPipelineShaderStageCreateInfo* pTessCtlSS, VkPipelineCreationFeedback* pTessCtlFB); bool addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& prevOutput, const VkPipelineShaderStageCreateInfo* pTessEvalSS, VkPipelineCreationFeedback* pTessEvalFB, const VkPipelineShaderStageCreateInfo*& pFragmentSS); bool addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& prevOutput, const VkPipelineShaderStageCreateInfo* pFragmentSS, VkPipelineCreationFeedback* pFragmentFB); + bool canVertexInputUseMetalDescriptor(const VkPipelineVertexInputStateCreateInfo* pVI); template bool addVertexInputToPipeline(T* inputDesc, const VkPipelineVertexInputStateCreateInfo* pVI, const SPIRVToMSLConversionConfiguration& shaderConfig); void adjustVertexInputForMultiview(MTLVertexDescriptor* inputDesc, const VkPipelineVertexInputStateCreateInfo* pVI, uint32_t viewCount, uint32_t oldViewCount = 1); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 779eb75ac..9227c6ea6 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -746,7 +746,11 @@ // Vertex input // This needs to happen before compiling the fragment shader, or we'll lose information on vertex attributes. - if (!addVertexInputToPipeline(plDesc.vertexDescriptor, pCreateInfo->pVertexInputState, shaderConfig)) { return nil; } + if (!shaderConfig.options.shouldUseShaderVertexLoader) { + if (!addVertexInputToPipeline(plDesc.vertexDescriptor, pCreateInfo->pVertexInputState, shaderConfig)) { return nil; } + } + // Disable for non-vertex shaders + shaderConfig.options.shouldUseShaderVertexLoader = false; // Fragment shader - only add if rasterization is enabled if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderConfig, vtxOutputs, pFragmentSS, pFragmentFB)) { return nil; } @@ -790,9 +794,11 @@ if (!addVertexShaderToPipeline(plDesc, pCreateInfo, shaderConfig, tcInputs, pVertexSS, pVertexFB, pVtxFunctions)) { return nil; } // Vertex input - plDesc.stageInputDescriptor = [MTLStageInputOutputDescriptor stageInputOutputDescriptor]; - if (!addVertexInputToPipeline(plDesc.stageInputDescriptor, pCreateInfo->pVertexInputState, shaderConfig)) { return nil; } - plDesc.stageInputDescriptor.indexBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageVertex]; + if (!shaderConfig.options.shouldUseShaderVertexLoader) { + plDesc.stageInputDescriptor = [MTLStageInputOutputDescriptor stageInputOutputDescriptor]; + if (!addVertexInputToPipeline(plDesc.stageInputDescriptor, pCreateInfo->pVertexInputState, shaderConfig)) { return nil; } + plDesc.stageInputDescriptor.indexBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageVertex]; + } plDesc.threadGroupSizeIsMultipleOfThreadExecutionWidth = YES; @@ -1022,7 +1028,7 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 shaderConfig.options.mslOptions.view_mask_buffer_index = _viewRangeBufferIndex.stages[kMVKShaderStageVertex]; shaderConfig.options.mslOptions.capture_output_to_buffer = false; shaderConfig.options.mslOptions.disable_rasterization = !_isRasterizing; - addVertexInputToShaderConversionConfig(shaderConfig, pCreateInfo); + addVertexInputToShaderConversionConfig(shaderConfig, pCreateInfo); MVKMTLFunction func = getMTLFunction(shaderConfig, pVertexSS, pVertexFB, "Vertex"); id mtlFunc = func.getMTLFunction(); @@ -1087,7 +1093,7 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 shaderConfig.options.mslOptions.capture_output_to_buffer = true; shaderConfig.options.mslOptions.vertex_for_tessellation = true; shaderConfig.options.mslOptions.disable_rasterization = true; - addVertexInputToShaderConversionConfig(shaderConfig, pCreateInfo); + addVertexInputToShaderConversionConfig(shaderConfig, pCreateInfo); addNextStageInputToShaderConversionConfig(shaderConfig, tcInputs); // We need to compile this function three times, with no indexing, 16-bit indices, and 32-bit indices. @@ -1311,6 +1317,72 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 return true; } +bool MVKGraphicsPipeline::canVertexInputUseMetalDescriptor(const VkPipelineVertexInputStateCreateInfo* pVI) { + if (mvkConfig().forceShaderVertexLoader) { + return false; + } + MVKArrayRef divisors; + for (const auto* next = (VkBaseInStructure*)pVI->pNext; next; next = next->pNext) { + switch (next->sType) { + case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: { + auto* pVIDS = reinterpret_cast(next); + divisors = MVKArrayRef(pVIDS->pVertexBindingDivisors, pVIDS->vertexBindingDivisorCount); + break; + } + default: + break; + } + } + MVKArrayRef attributes(pVI->pVertexAttributeDescriptions, pVI->vertexAttributeDescriptionCount); + struct StrideAndAlign { + uint32_t stride = 0; + uint32_t align = 0; + }; + MVKSmallVector strides; + for (const auto& attribute : attributes) { + if (strides.size() < attribute.binding + 1) + strides.resize(attribute.binding + 1); + const MSLFormatInfo& fmt = CompilerMSL::get_format_info(static_cast(attribute.format)); + strides[attribute.binding].align = std::max(strides[attribute.binding].align, fmt.vk_align()); + } + VkDeviceSize mtlVtxStrideAlignment = _device->_pMetalFeatures->vertexStrideAlignment; + for (const auto& binding : MVKArrayRef(pVI->pVertexBindingDescriptions, pVI->vertexBindingDescriptionCount)) { + if (binding.binding >= strides.size() || strides[binding.binding].align == 0) + continue; + strides[binding.binding].stride = binding.stride; + // Metal has stricter requirements on the alignment of vertex strides + if ((binding.stride % mtlVtxStrideAlignment) != 0) + return false; + // Metal doesn't support 0 divisors + if (binding.stride != 0) { + for (const auto& divisor : divisors) { + if (divisor.binding == binding.binding && divisor.divisor == 0) + return false; + } + } + } + for (const auto& attribute : attributes) { + const MSLFormatInfo& fmt = CompilerMSL::get_format_info(static_cast(attribute.format)); + StrideAndAlign stride = strides[attribute.binding]; + // Metal doesn't support strides past the end of the vertex + if (stride.stride != 0 && attribute.offset + fmt.size() > stride.stride) + return false; + // Metal requires 4-byte alignment of vertex buffer offsets + // Therefore an unaligned attribute offset will always produce an unaligned load, making this guaranteed UB + // (Vulkan lets you unalign both the attribute offset and vertex buffer offset as long as the load is aligned once you add them) + // The AMD driver in particular doesn't handle unaligned attribute offsets + if ((attribute.offset & (fmt.align() - 1)) != 0) + return false; + // Metal requires vertex buffer offsets to be aligned to this value + if (stride.align < mtlVtxStrideAlignment) + return false; + // Check if Metal supports the format at all + if (getPixelFormats()->getMTLVertexFormat(attribute.format) == MTLVertexFormatInvalid) + return false; + } + return true; +} + template bool MVKGraphicsPipeline::addVertexInputToPipeline(T* inputDesc, const VkPipelineVertexInputStateCreateInfo* pVI, @@ -1641,6 +1713,7 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 shaderConfig.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension; shaderConfig.options.mslOptions.r32ui_linear_texture_alignment = (uint32_t)_device->getVkFormatTexelBufferAlignment(VK_FORMAT_R32_UINT, this); shaderConfig.options.mslOptions.texture_buffer_native = _device->_pMetalFeatures->textureBuffers; + shaderConfig.options.mslOptions.use_pixel_type_loads = _device->_pMetalFeatures->pixelTypeLoads; bool useMetalArgBuff = isUsingMetalArgumentBuffers(); shaderConfig.options.mslOptions.argument_buffers = useMetalArgBuff; @@ -1711,6 +1784,7 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 shaderConfig.options.tessPatchKind = reflectData.patchKind; shaderConfig.options.numTessControlPoints = reflectData.numControlPoints; + shaderConfig.options.shouldUseShaderVertexLoader = !canVertexInputUseMetalDescriptor(pCreateInfo->pVertexInputState); } uint32_t MVKGraphicsPipeline::getImplicitBufferIndex(MVKShaderStage stage, uint32_t bufferIndexOffset) { @@ -1760,53 +1834,89 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 // Initializes the vertex attributes in a shader conversion configuration. void MVKGraphicsPipeline::addVertexInputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo) { - // Set the shader conversion config vertex attribute information - shaderConfig.shaderInputs.clear(); - uint32_t vaCnt = pCreateInfo->pVertexInputState->vertexAttributeDescriptionCount; - for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) { - const VkVertexInputAttributeDescription* pVKVA = &pCreateInfo->pVertexInputState->pVertexAttributeDescriptions[vaIdx]; - - // Set binding and offset from Vulkan vertex attribute - mvk::MSLShaderInput si; - si.shaderVar.location = pVKVA->location; - si.binding = pVKVA->binding; - - // Metal can't do signedness conversions on vertex buffers (rdar://45922847). If the shader - // and the vertex attribute have mismatched signedness, we have to fix the shader - // to match the vertex attribute. So tell SPIRV-Cross if we're expecting an unsigned format. - // Only do this if the attribute could be reasonably expected to fit in the shader's - // declared type. Programs that try to invoke undefined behavior are on their own. - switch (getPixelFormats()->getFormatType(pVKVA->format) ) { - case kMVKFormatColorUInt8: - si.shaderVar.format = MSL_VERTEX_FORMAT_UINT8; - break; + // Set the shader conversion config vertex attribute information + shaderConfig.shaderInputs.clear(); + shaderConfig.vertexAttributes.clear(); + shaderConfig.vertexAttributes.clear(); + const VkPipelineVertexInputStateCreateInfo* pVIS = pCreateInfo->pVertexInputState; + MVKArrayRef bindings(pVIS->pVertexBindingDescriptions, pVIS->vertexBindingDescriptionCount); + MVKArrayRef attributes(pVIS->pVertexAttributeDescriptions, pVIS->vertexAttributeDescriptionCount); + if (shaderConfig.options.shouldUseShaderVertexLoader) { + MVKArrayRef divisors; + for (const auto* next = (VkBaseInStructure*)pVIS->pNext; next; next = next->pNext) { + switch (next->sType) { + case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: { + auto* pVIDS = reinterpret_cast(next); + divisors = MVKArrayRef(pVIDS->pVertexBindingDivisors, pVIDS->vertexBindingDivisorCount); + break; + } + default: + break; + } + } + for (const VkVertexInputBindingDescription& vkBinding : bindings) { + mvk::MSLVertexBinding mslBinding; + mslBinding.binding.binding = getMetalBufferIndexForVertexAttributeBinding(vkBinding.binding); + mslBinding.binding.stride = vkBinding.stride; + mslBinding.binding.rate = static_cast(vkBinding.inputRate); + for (const auto& divisor : divisors) { + if (divisor.binding == vkBinding.binding) + mslBinding.binding.divisor = divisor.divisor; + } + shaderConfig.vertexBindings.push_back(mslBinding); + } + for (const VkVertexInputAttributeDescription& vkAttr : attributes) { + mvk::MSLVertexAttribute mslAttr; + mslAttr.attribute.location = vkAttr.location; + mslAttr.attribute.binding = getMetalBufferIndexForVertexAttributeBinding(vkAttr.binding); + mslAttr.attribute.format = static_cast(vkAttr.format); + mslAttr.attribute.offset = vkAttr.offset; + shaderConfig.vertexAttributes.push_back(mslAttr); + } + } else { + for (const VkVertexInputAttributeDescription& attribute : attributes) { + // Set binding and offset from Vulkan vertex attribute + mvk::MSLShaderInput si; + si.shaderVar.location = attribute.location; + si.binding = attribute.binding; + + // Metal can't do signedness conversions on vertex buffers (rdar://45922847). If the shader + // and the vertex attribute have mismatched signedness, we have to fix the shader + // to match the vertex attribute. So tell SPIRV-Cross if we're expecting an unsigned format. + // Only do this if the attribute could be reasonably expected to fit in the shader's + // declared type. Programs that try to invoke undefined behavior are on their own. + switch (getPixelFormats()->getFormatType(attribute.format) ) { + case kMVKFormatColorUInt8: + si.shaderVar.format = MSL_VERTEX_FORMAT_UINT8; + break; - case kMVKFormatColorUInt16: - si.shaderVar.format = MSL_VERTEX_FORMAT_UINT16; - break; + case kMVKFormatColorUInt16: + si.shaderVar.format = MSL_VERTEX_FORMAT_UINT16; + break; - case kMVKFormatDepthStencil: - // Only some depth/stencil formats have unsigned components. - switch (pVKVA->format) { - case VK_FORMAT_S8_UINT: - case VK_FORMAT_D16_UNORM_S8_UINT: - case VK_FORMAT_D24_UNORM_S8_UINT: - case VK_FORMAT_D32_SFLOAT_S8_UINT: - si.shaderVar.format = MSL_VERTEX_FORMAT_UINT8; - break; + case kMVKFormatDepthStencil: + // Only some depth/stencil formats have unsigned components. + switch (attribute.format) { + case VK_FORMAT_S8_UINT: + case VK_FORMAT_D16_UNORM_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + case VK_FORMAT_D32_SFLOAT_S8_UINT: + si.shaderVar.format = MSL_VERTEX_FORMAT_UINT8; + break; - default: - break; - } - break; + default: + break; + } + break; - default: - break; + default: + break; - } + } - shaderConfig.shaderInputs.push_back(si); - } + shaderConfig.shaderInputs.push_back(si); + } + } } // Initializes the shader outputs in a shader conversion config from the next stage input. @@ -1858,6 +1968,8 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 SPIRVShaderOutputs& shaderOutputs) { // Set the shader conversion configuration input variable information shaderConfig.shaderInputs.clear(); + shaderConfig.vertexAttributes.clear(); + shaderConfig.vertexBindings.clear(); uint32_t siCnt = (uint32_t)shaderOutputs.size(); for (uint32_t siIdx = 0; siIdx < siCnt; siIdx++) { if (!shaderOutputs[siIdx].isUsed) { continue; } @@ -2472,6 +2584,7 @@ void serialize(Archive & archive, CompilerMSL::Options& opt) { opt.ios_use_simdgroup_functions, opt.emulate_subgroups, opt.vertex_index_type, + opt.use_pixel_type_loads, opt.force_sample_rate_shading, opt.manual_helper_invocation_updates, opt.check_discarded_frag_stores, @@ -2500,6 +2613,22 @@ void serialize(Archive & archive, MSLResourceBinding& rb) { rb.msl_sampler); } + template + void serialize(Archive & archive, MSLVertexBinding& vb) { + archive(vb.binding, + vb.stride, + vb.rate, + vb.divisor); + } + + template + void serialize(Archive & archive, MSLVertexAttribute& va) { + archive(va.location, + va.binding, + va.format, + va.offset); + } + template void serialize(Archive & archive, MSLConstexprSampler& cs) { archive(cs.coord, @@ -2556,7 +2685,8 @@ void serialize(Archive & archive, SPIRVToMSLConversionOptions& opt) { opt.entryPointStage, opt.tessPatchKind, opt.numTessControlPoints, - opt.shouldFlipVertexY); + opt.shouldFlipVertexY, + opt.shouldUseShaderVertexLoader); } template @@ -2574,6 +2704,17 @@ void serialize(Archive & archive, MSLResourceBinding& rb) { rb.outIsUsedByShader); } + template + void serialize(Archive & archive, MSLVertexAttribute& va) { + archive(va.attribute, + va.outIsUsedByShader); + } + + template + void serialize(Archive & archive, MSLVertexBinding& vb) { + archive(vb.binding); + } + template void serialize(Archive & archive, DescriptorBinding& db) { archive(db.stage, @@ -2587,6 +2728,8 @@ void serialize(Archive & archive, SPIRVToMSLConversionConfiguration& ctx) { archive(ctx.options, ctx.shaderInputs, ctx.shaderOutputs, + ctx.vertexAttributes, + ctx.vertexBindings, ctx.resourceBindings, ctx.discreteDescriptorSets); } diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm index 9658d20f8..4fd26e8ed 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm @@ -475,29 +475,7 @@ } MTLVertexFormat MVKPixelFormats::getMTLVertexFormat(VkFormat vkFormat) { - auto& vkDesc = getVkFormatDesc(vkFormat); - MTLVertexFormat mtlVtxFmt = vkDesc.mtlVertexFormat; - - // If the MTLVertexFormat is not supported but VkFormat is valid, - // report an error, and possibly substitute a different MTLVertexFormat. - if ( !mtlVtxFmt && vkFormat ) { - string errMsg; - errMsg += "VkFormat "; - errMsg += vkDesc.name; - errMsg += " is not supported for vertex buffers on this device."; - - if (vkDesc.vertexIsSupportedOrSubstitutable()) { - mtlVtxFmt = vkDesc.mtlVertexFormatSubstitute; - - auto& vkDescSubs = getVkFormatDesc(getMTLVertexFormatDesc(mtlVtxFmt).vkFormat); - errMsg += " Using VkFormat "; - errMsg += vkDescSubs.name; - errMsg += " instead."; - } - MVKBaseObject::reportError(_physicalDevice, VK_ERROR_FORMAT_NOT_SUPPORTED, "%s", errMsg.c_str()); - } - - return mtlVtxFmt; + return getVkFormatDesc(vkFormat).mtlVertexFormat; } MTLClearColor MVKPixelFormats::getMTLClearColor(VkClearValue vkClearValue, VkFormat vkFormat) { diff --git a/MoltenVK/MoltenVK/Utility/MVKConfigMembers.def b/MoltenVK/MoltenVK/Utility/MVKConfigMembers.def index aff0cf33a..6eca5f04d 100644 --- a/MoltenVK/MoltenVK/Utility/MVKConfigMembers.def +++ b/MoltenVK/MoltenVK/Utility/MVKConfigMembers.def @@ -81,6 +81,7 @@ MVK_CONFIG_MEMBER(useMetalArgumentBuffers, MVKUseMetalArgumentBuf MVK_CONFIG_MEMBER(shaderSourceCompressionAlgorithm, MVKConfigCompressionAlgorithm, SHADER_COMPRESSION_ALGORITHM) MVK_CONFIG_MEMBER(shouldMaximizeConcurrentCompilation, VkBool32, SHOULD_MAXIMIZE_CONCURRENT_COMPILATION) MVK_CONFIG_MEMBER(timestampPeriodLowPassAlpha, float, TIMESTAMP_PERIOD_LOWPASS_ALPHA) +MVK_CONFIG_MEMBER(forceShaderVertexLoader, VkBool32, FORCE_SHADER_VERTEX_LOADER) #undef MVK_CONFIG_MEMBER #undef MVK_CONFIG_MEMBER_STRING diff --git a/MoltenVK/MoltenVK/Utility/MVKEnvironment.h b/MoltenVK/MoltenVK/Utility/MVKEnvironment.h index f6f1ae9ed..f9c9b8d1a 100644 --- a/MoltenVK/MoltenVK/Utility/MVKEnvironment.h +++ b/MoltenVK/MoltenVK/Utility/MVKEnvironment.h @@ -325,3 +325,8 @@ void mvkSetConfig(const MVKConfiguration& mvkConfig); #ifndef MVK_CONFIG_TIMESTAMP_PERIOD_LOWPASS_ALPHA # define MVK_CONFIG_TIMESTAMP_PERIOD_LOWPASS_ALPHA 0.05 #endif + +/** Always use shader code to load vertices (rather than MTLVertexDescriptors). Disabled by default. */ +#ifndef MVK_CONFIG_FORCE_SHADER_VERTEX_LOADER +# define MVK_CONFIG_FORCE_SHADER_VERTEX_LOADER 0 +#endif diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp index ced660aa8..47c03b661 100644 --- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp +++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp @@ -53,6 +53,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionOptions::matches(const SPIRVToMSLConv if (tessPatchKind != other.tessPatchKind) { return false; } if (numTessControlPoints != other.numTessControlPoints) { return false; } if (shouldFlipVertexY != other.shouldFlipVertexY) { return false; } + if (shouldUseShaderVertexLoader != other.shouldUseShaderVertexLoader) { return false; } return true; } @@ -126,6 +127,14 @@ MVK_PUBLIC_SYMBOL mvk::MSLResourceBinding::MSLResourceBinding() { constExprSampler = SPIRV_CROSS_NAMESPACE::MSLConstexprSampler(); } +MVK_PUBLIC_SYMBOL bool mvk::MSLVertexAttribute::matches(const mvk::MSLVertexAttribute& other) const { + return memcmp(&attribute, &other.attribute, sizeof(attribute)) != 0; +} + +MVK_PUBLIC_SYMBOL bool mvk::MSLVertexBinding::matches(const mvk::MSLVertexBinding& other) const { + return memcmp(&binding, &other.binding, sizeof(binding)) != 0; +} + MVK_PUBLIC_SYMBOL bool mvk::DescriptorBinding::matches(const mvk::DescriptorBinding& other) const { if (stage != other.stage) { return false; } if (descriptorSet != other.descriptorSet) { return false; } @@ -181,6 +190,7 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::isResourceUsed(Executi } MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::markAllInterfaceVarsAndResourcesUsed() { + for (auto& va : vertexAttributes) { va.outIsUsedByShader = true; } for (auto& si : shaderInputs) { si.outIsUsedByShader = true; } for (auto& so : shaderOutputs) { so.outIsUsedByShader = true; } for (auto& rb : resourceBindings) { rb.outIsUsedByShader = true; } @@ -193,7 +203,7 @@ MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::markAllInterfaceVarsAn // and shaderInputs and shaderOutputs are populated before each stage, so neither needs to be filtered by stage here. MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::matches(const SPIRVToMSLConversionConfiguration& other) const { - if ( !options.matches(other.options) ) { return false; } + if ( !options.matches(other.options) ) { return false; } for (const auto& si : shaderInputs) { if (si.outIsUsedByShader && !containsMatching(other.shaderInputs, si)) { return false; } @@ -203,11 +213,22 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::matches(const SPIRVToM if (so.outIsUsedByShader && !containsMatching(other.shaderOutputs, so)) { return false; } } - for (const auto& rb : resourceBindings) { - if (rb.resourceBinding.stage == options.entryPointStage && + uint32_t usedBindings = 0; + for (const auto& va : vertexAttributes) { + if (va.outIsUsedByShader && !containsMatching(other.vertexAttributes, va)) { return false; } + if (va.outIsUsedByShader) { usedBindings |= 1 << va.attribute.binding; } + } + + for (const auto& vb : vertexBindings) { + bool used = (usedBindings >> vb.binding.binding) & 1; + if (used && !containsMatching(other.vertexBindings, vb)) { return false; } + } + + for (const auto& rb : resourceBindings) { + if (rb.resourceBinding.stage == options.entryPointStage && rb.outIsUsedByShader && !containsMatching(other.resourceBindings, rb)) { return false; } - } + } for (const auto& db : dynamicBufferDescriptors) { if (db.stage == options.entryPointStage && @@ -218,12 +239,19 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::matches(const SPIRVToM if ( !contains(other.discreteDescriptorSets, dsIdx)) { return false; } } - return true; + return true; } MVK_PUBLIC_SYMBOL void SPIRVToMSLConversionConfiguration::alignWith(const SPIRVToMSLConversionConfiguration& srcContext) { + for (auto& va : vertexAttributes) { + va.outIsUsedByShader = false; + for (auto& srcVA : srcContext.vertexAttributes) { + if (va.matches(srcVA)) { va.outIsUsedByShader = srcVA.outIsUsedByShader; } + } + } + for (auto& si : shaderInputs) { si.outIsUsedByShader = false; for (auto& srcSI : srcContext.shaderInputs) { @@ -305,6 +333,15 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur pMSLCompiler->set_common_options(scOpts); // Add shader inputs and outputs + if (shaderConfig.options.shouldUseShaderVertexLoader) { + for (auto& va : shaderConfig.vertexAttributes) { + pMSLCompiler->add_shader_vertex_loader_attribute(va.attribute); + } + for (auto& vb : shaderConfig.vertexBindings) { + pMSLCompiler->add_shader_vertex_loader_binding(vb.binding); + } + } + for (auto& si : shaderConfig.shaderInputs) { pMSLCompiler->add_msl_shader_input(si.shaderVar); } @@ -379,6 +416,11 @@ MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfigur } } + if (shaderConfig.options.shouldUseShaderVertexLoader) { + for (auto& ctxVA : shaderConfig.vertexAttributes) { + ctxVA.outIsUsedByShader = pMSLCompiler->is_msl_shader_input_used(ctxVA.attribute.location); + } + } for (auto& ctxSI : shaderConfig.shaderInputs) { if (ctxSI.shaderVar.builtin != spv::BuiltInMax) { ctxSI.outIsUsedByShader = pMSLCompiler->has_active_builtin(ctxSI.shaderVar.builtin, spv::StorageClassInput); diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h index 1789ee93c..08236c05a 100644 --- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h +++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h @@ -43,6 +43,7 @@ namespace mvk { spv::ExecutionMode tessPatchKind = spv::ExecutionModeMax; uint32_t numTessControlPoints = 0; bool shouldFlipVertexY = true; + bool shouldUseShaderVertexLoader = false; /** * Returns whether the specified options match this one. @@ -86,6 +87,49 @@ namespace mvk { } MSLShaderInterfaceVariable, MSLShaderInput; + /** + * Defines a VK vertex attribute for use with in-shader vertex loading + * + * The outIsUsedByShader flag is set to true during conversion of SPIR-V to MSL if the shader + * makes use of this interface variable. This allows a pipeline to be optimized, and for two + * shader conversion configurations to be compared only against the attributes that are + * actually used by the shader. + * + * THIS STRUCT IS STREAMED OUT AS PART OF THE PIPELINE CACHE. + * CHANGES TO THIS STRUCT SHOULD BE CAPTURED IN THE STREAMING LOGIC OF THE PIPELINE CACHE. + */ + typedef struct MSLVertexAttribute { + SPIRV_CROSS_NAMESPACE::MSLVertexAttribute attribute; + bool outIsUsedByShader = false; + + /** + * Returns whether the specified resource binding match this one. + * It does if all corresponding elements except outIsUsedByShader are equal. + */ + bool matches(const MSLVertexAttribute& other) const; + } MSLVertexAttribute; + + /** + * Defines a VK vertex binding for use with in-shader vertex loading + * + * The outIsUsedByShader flag is set to true during conversion of SPIR-V to MSL if the shader + * makes use of this interface variable. This allows a pipeline to be optimized, and for two + * shader conversion configurations to be compared only against the attributes that are + * actually used by the shader. + * + * THIS STRUCT IS STREAMED OUT AS PART OF THE PIPELINE CACHE. + * CHANGES TO THIS STRUCT SHOULD BE CAPTURED IN THE STREAMING LOGIC OF THE PIPELINE CACHE. + */ + typedef struct MSLVertexBinding { + SPIRV_CROSS_NAMESPACE::MSLVertexBinding binding; + + /** + * Returns whether the specified resource binding match this one. + * It does if all corresponding elements except outIsUsedByShader are equal. + */ + bool matches(const MSLVertexBinding& other) const; + } MSLVertexBinding; + /** * Matches the binding index of a MSL resource for a binding within a descriptor set. * Taken together, the stage, desc_set and binding combine to form a reference to a resource @@ -148,6 +192,8 @@ namespace mvk { SPIRVToMSLConversionOptions options; std::vector shaderInputs; std::vector shaderOutputs; + std::vector vertexAttributes; + std::vector vertexBindings; std::vector resourceBindings; std::vector discreteDescriptorSets; std::vector dynamicBufferDescriptors; From 41e6c00d18ff1dc08a35d2aae9575e8cf904c3b6 Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Mon, 11 Sep 2023 11:16:15 -0500 Subject: [PATCH 3/7] Remove obsolete vertex loader workarounds --- MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm | 12 -- .../Commands/MVKCommandEncoderState.h | 3 - .../Commands/MVKCommandEncoderState.mm | 34 ----- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h | 9 -- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 130 +----------------- 5 files changed, 3 insertions(+), 185 deletions(-) diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm index 20d278151..bb7c4068c 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm @@ -191,9 +191,6 @@ atIndex: pipeline->getOutputBufferIndex().stages[kMVKShaderStageVertex]]; } [mtlTessCtlEncoder setStageInRegion: MTLRegionMake2D(_firstVertex, _firstInstance, _vertexCount, _instanceCount)]; - // If there are vertex bindings with a zero vertex divisor, I need to offset them by - // _firstInstance * stride, since that is the expected behaviour for a divisor of 0. - cmdEncoder->_graphicsResourcesState.offsetZeroDivisorVertexBuffers(stage, pipeline, _firstInstance); id vtxState = pipeline->getTessVertexStageState(); if (cmdEncoder->getDevice()->_pMetalFeatures->nonUniformThreadgroups) { #if MVK_MACOS_OR_IOS @@ -294,7 +291,6 @@ MVKRenderSubpass* subpass = cmdEncoder->getSubpass(); uint32_t viewCount = subpass->isMultiview() ? subpass->getViewCountInMetalPass(cmdEncoder->getMultiviewPassIndex()) : 1; uint32_t instanceCount = _instanceCount * viewCount; - cmdEncoder->_graphicsResourcesState.offsetZeroDivisorVertexBuffers(stage, pipeline, _firstInstance); if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) { [cmdEncoder->_mtlRenderEncoder drawPrimitives: cmdEncoder->_mtlPrimitiveType vertexStart: _firstVertex @@ -422,9 +418,6 @@ offset: idxBuffOffset atIndex: pipeline->getIndirectParamsIndex().stages[kMVKShaderStageVertex]]; [mtlTessCtlEncoder setStageInRegion: MTLRegionMake2D(_vertexOffset, _firstInstance, _indexCount, _instanceCount)]; - // If there are vertex bindings with a zero vertex divisor, I need to offset them by - // _firstInstance * stride, since that is the expected behaviour for a divisor of 0. - cmdEncoder->_graphicsResourcesState.offsetZeroDivisorVertexBuffers(stage, pipeline, _firstInstance); id vtxState = ibb.mtlIndexType == MTLIndexTypeUInt16 ? pipeline->getTessVertexStageIndex16State() : pipeline->getTessVertexStageIndex32State(); if (cmdEncoder->getDevice()->_pMetalFeatures->nonUniformThreadgroups) { #if MVK_MACOS_OR_IOS @@ -528,7 +521,6 @@ MVKRenderSubpass* subpass = cmdEncoder->getSubpass(); uint32_t viewCount = subpass->isMultiview() ? subpass->getViewCountInMetalPass(cmdEncoder->getMultiviewPassIndex()) : 1; uint32_t instanceCount = _instanceCount * viewCount; - cmdEncoder->_graphicsResourcesState.offsetZeroDivisorVertexBuffers(stage, pipeline, _firstInstance); if (cmdEncoder->_pDeviceMetalFeatures->baseVertexInstanceDrawing) { [cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType indexCount: _indexCount @@ -1230,9 +1222,6 @@ indirectBufferOffset: mtlTempIndBuffOfst]; mtlTempIndBuffOfst += sizeof(MTLStageInRegionIndirectArguments); } - // If this is a synthetic command that originated in a direct call, and there are vertex bindings with a zero vertex - // divisor, I need to offset them by _firstInstance * stride, since that is the expected behaviour for a divisor of 0. - cmdEncoder->_graphicsResourcesState.offsetZeroDivisorVertexBuffers(stage, pipeline, _directCmdFirstInstance); [mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: mtlIndBuff indirectBufferOffset: mtlTempIndBuffOfst threadsPerThreadgroup: MTLSizeMake(vtxThreadExecWidth, 1, 1)]; @@ -1311,7 +1300,6 @@ cmdEncoder->_graphicsResourcesState.beginMetalRenderPass(); cmdEncoder->getPushConstants(VK_SHADER_STAGE_VERTEX_BIT)->beginMetalRenderPass(); } else { - cmdEncoder->_graphicsResourcesState.offsetZeroDivisorVertexBuffers(stage, pipeline, _directCmdFirstInstance); [cmdEncoder->_mtlRenderEncoder drawIndexedPrimitives: cmdEncoder->_mtlPrimitiveType indexType: (MTLIndexType)ibb.mtlIndexType indexBuffer: ibb.mtlBuffer diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h index 6dbeb6472..de28d65d8 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h @@ -556,9 +556,6 @@ class MVKGraphicsResourcesCommandEncoderState : public MVKResourcesCommandEncode MTLResourceUsage mtlUsage, MTLRenderStages mtlStages) override; - /** Offset all buffers for vertex attribute bindings with zero divisors by the given number of strides. */ - void offsetZeroDivisorVertexBuffers(MVKGraphicsStage stage, MVKGraphicsPipeline* pipeline, uint32_t firstInstance); - /** * Marks the buffer binding using the index as having been overridden, * such as by push constants or internal rendering in some transfers. diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm index 37f0194f9..288a901aa 100644 --- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm +++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm @@ -733,30 +733,6 @@ encodeBinding(shaderStage.samplerStateBindings, shaderStage.areSamplerStateBindingsDirty, bindSampler); } -void MVKGraphicsResourcesCommandEncoderState::offsetZeroDivisorVertexBuffers(MVKGraphicsStage stage, - MVKGraphicsPipeline* pipeline, - uint32_t firstInstance) { - auto& shaderStage = _shaderStageResourceBindings[kMVKShaderStageVertex]; - for (auto& binding : pipeline->getZeroDivisorVertexBindings()) { - uint32_t mtlBuffIdx = pipeline->getMetalBufferIndexForVertexAttributeBinding(binding.first); - auto iter = std::find_if(shaderStage.bufferBindings.begin(), shaderStage.bufferBindings.end(), [mtlBuffIdx](const MVKMTLBufferBinding& b) { return b.index == mtlBuffIdx; }); - if (!iter) { continue; } - switch (stage) { - case kMVKGraphicsStageVertex: - [_cmdEncoder->getMTLComputeEncoder(kMVKCommandUseTessellationVertexTessCtl) setBufferOffset: iter->offset + firstInstance * binding.second - atIndex: mtlBuffIdx]; - break; - case kMVKGraphicsStageRasterization: - [_cmdEncoder->_mtlRenderEncoder setVertexBufferOffset: iter->offset + firstInstance * binding.second - atIndex: mtlBuffIdx]; - break; - default: - assert(false); // If we hit this, something went wrong. - break; - } - } -} - void MVKGraphicsResourcesCommandEncoderState::endMetalRenderPass() { MVKResourcesCommandEncoderState::endMetalRenderPass(); _renderUsageStages.clear(); @@ -831,16 +807,6 @@ offset: b.offset atIndex: b.index]; } - - // Add any translated vertex bindings for this binding - auto xltdVtxBindings = pipeline->getTranslatedVertexBindings(); - for (auto& xltdBind : xltdVtxBindings) { - if (b.index == pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.binding)) { - [cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer - offset: b.offset + xltdBind.translationOffset - atIndex: pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding)]; - } - } } } else { b.isDirty = true; // We haven't written it out, so leave dirty until next time. diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h index 5f03f5e68..1ac1b744d 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h @@ -292,12 +292,6 @@ class MVKGraphicsPipeline : public MVKPipeline { /** Returns the Metal vertex buffer index to use for the specified vertex attribute binding number. */ uint32_t getMetalBufferIndexForVertexAttributeBinding(uint32_t binding) { return _device->getMetalBufferIndexForVertexAttributeBinding(binding); } - /** Returns the collection of translated vertex bindings. */ - MVKArrayRef getTranslatedVertexBindings() { return _translatedVertexBindings.contents(); } - - /** Returns the collection of instance-rate vertex bindings whose divisor is zero, along with their strides. */ - MVKArrayRef getZeroDivisorVertexBindings() { return _zeroDivisorVertexBindings.contents(); } - /** Returns the MTLArgumentEncoder for the descriptor set. */ MVKMTLArgumentEncoder& getMTLArgumentEncoder(uint32_t descSetIndex, MVKShaderStage stage) override { return _mtlArgumentEncoders[descSetIndex].stages[stage]; } @@ -345,7 +339,6 @@ class MVKGraphicsPipeline : public MVKPipeline { bool isRenderingPoints(const VkGraphicsPipelineCreateInfo* pCreateInfo); bool isRasterizationDisabled(const VkGraphicsPipelineCreateInfo* pCreateInfo); bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name); - uint32_t getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding); uint32_t getImplicitBufferIndex(MVKShaderStage stage, uint32_t bufferIndexOffset); MVKMTLFunction getMTLFunction(SPIRVToMSLConversionConfiguration& shaderConfig, const VkPipelineShaderStageCreateInfo* pShaderStage, @@ -362,8 +355,6 @@ class MVKGraphicsPipeline : public MVKPipeline { MVKSmallVector _scissors; MVKSmallVector _dynamicState; MVKSmallVector _customSamplePositions; - MVKSmallVector _translatedVertexBindings; - MVKSmallVector _zeroDivisorVertexBindings; MVKSmallVector _mtlArgumentEncoders; MVKSmallVector _descriptorBindingUse; MVKSmallVector _stagesUsingPhysicalStorageBufferAddressesCapability; diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 9227c6ea6..1bad0a52c 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -882,33 +882,6 @@ static VkFormat mvkFormatFromOutput(const SPIRVShaderOutput& output) { return VK_FORMAT_UNDEFINED; } -// Returns a format of the same base type with vector length adjusted to fit size. -static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint32_t size) { -#define MVK_ADJUST_FORMAT_CASE(size_1, type, suffix) \ - case MTLVertexFormat##type##4##suffix: if (size >= 4 * (size_1)) { return MTLVertexFormat##type##4##suffix; } \ - case MTLVertexFormat##type##3##suffix: if (size >= 3 * (size_1)) { return MTLVertexFormat##type##3##suffix; } \ - case MTLVertexFormat##type##2##suffix: if (size >= 2 * (size_1)) { return MTLVertexFormat##type##2##suffix; } \ - case MTLVertexFormat##type##suffix: if (size >= 1 * (size_1)) { return MTLVertexFormat##type##suffix; } \ - return MTLVertexFormatInvalid; - - switch (format) { - MVK_ADJUST_FORMAT_CASE(1, UChar, ) - MVK_ADJUST_FORMAT_CASE(1, Char, ) - MVK_ADJUST_FORMAT_CASE(1, UChar, Normalized) - MVK_ADJUST_FORMAT_CASE(1, Char, Normalized) - MVK_ADJUST_FORMAT_CASE(2, UShort, ) - MVK_ADJUST_FORMAT_CASE(2, Short, ) - MVK_ADJUST_FORMAT_CASE(2, UShort, Normalized) - MVK_ADJUST_FORMAT_CASE(2, Short, Normalized) - MVK_ADJUST_FORMAT_CASE(2, Half, ) - MVK_ADJUST_FORMAT_CASE(4, Float, ) - MVK_ADJUST_FORMAT_CASE(4, UInt, ) - MVK_ADJUST_FORMAT_CASE(4, Int, ) - default: return format; - } -#undef MVK_ADJUST_FORMAT_CASE -} - // Returns a retained MTLComputePipelineDescriptor for the tess. control stage of a tessellated draw constructed from this instance, or nil if an error occurs. // It is the responsibility of the caller to release the returned descriptor. MTLComputePipelineDescriptor* MVKGraphicsPipeline::newMTLTessControlStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, @@ -1405,15 +1378,6 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 for (uint32_t i = 0; i < vbCnt; i++) { const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i]; if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) { - - // Vulkan allows any stride, but Metal only allows multiples of 4. - // TODO: We could try to expand the buffer to the required alignment in that case. - VkDeviceSize mtlVtxStrideAlignment = _device->_pMetalFeatures->vertexStrideAlignment; - if ((pVKVB->stride % mtlVtxStrideAlignment) != 0) { - setConfigurationResult(reportError(VK_ERROR_INITIALIZATION_FAILED, "Under Metal, vertex attribute binding strides must be aligned to %llu bytes.", mtlVtxStrideAlignment)); - return false; - } - maxBinding = max(pVKVB->binding, maxBinding); uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); auto vbDesc = inputDesc.layouts[vbIdx]; @@ -1432,19 +1396,14 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 } // Vertex buffer divisors (step rates) - std::unordered_set zeroDivisorBindings; if (pVertexInputDivisorState) { uint32_t vbdCnt = pVertexInputDivisorState->vertexBindingDivisorCount; for (uint32_t i = 0; i < vbdCnt; i++) { const VkVertexInputBindingDivisorDescriptionEXT* pVKVB = &pVertexInputDivisorState->pVertexBindingDivisors[i]; if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) { uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); - if ((NSUInteger)inputDesc.layouts[vbIdx].stepFunction == MTLStepFunctionPerInstance || - (NSUInteger)inputDesc.layouts[vbIdx].stepFunction == MTLStepFunctionThreadPositionInGridY) { - if (pVKVB->divisor == 0) { - inputDesc.layouts[vbIdx].stepFunction = (decltype(inputDesc.layouts[vbIdx].stepFunction))MTLStepFunctionConstant; - zeroDivisorBindings.insert(pVKVB->binding); - } + MTLStepFunction fn = static_cast(inputDesc.layouts[vbIdx].stepFunction); + if (fn == MTLStepFunctionPerInstance || fn == MTLStepFunctionThreadPositionInGridY) { inputDesc.layouts[vbIdx].stepRate = pVKVB->divisor; } } @@ -1459,9 +1418,6 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 uint32_t vaBinding = pVKVA->binding; uint32_t vaOffset = pVKVA->offset; - // Vulkan allows offsets to exceed the buffer stride, but Metal doesn't. - // If this is the case, fetch a translated artificial buffer binding, using the same MTLBuffer, - // but that is translated so that the reduced VA offset fits into the binding stride. const VkVertexInputBindingDescription* pVKVB = pVI->pVertexBindingDescriptions; uint32_t attrSize = 0; for (uint32_t j = 0; j < vbCnt; j++, pVKVB++) { @@ -1474,76 +1430,18 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 auto vbDesc = inputDesc.layouts[vbIdx]; uint32_t strideLowBound = vaOffset + attrSize; if (vbDesc.stride < strideLowBound) vbDesc.stride = strideLowBound; - } else if (vaOffset && vaOffset + attrSize > pVKVB->stride) { - // Move vertex attribute offset into the stride. This vertex attribute may be - // combined with other vertex attributes into the same translated buffer binding. - // But if the reduced offset combined with the vertex attribute size still won't - // fit into the buffer binding stride, force the vertex attribute offset to zero, - // effectively dedicating this vertex attribute to its own buffer binding. - uint32_t origOffset = vaOffset; - vaOffset %= pVKVB->stride; - if (vaOffset + attrSize > pVKVB->stride) { - vaOffset = 0; - } - vaBinding = getTranslatedVertexBinding(vaBinding, origOffset - vaOffset, maxBinding); - if (zeroDivisorBindings.count(pVKVB->binding)) { - zeroDivisorBindings.insert(vaBinding); - } } break; } } auto vaDesc = inputDesc.attributes[pVKVA->location]; - auto mtlFormat = (decltype(vaDesc.format))getPixelFormats()->getMTLVertexFormat(pVKVA->format); - if (pVKVB->stride && attrSize > pVKVB->stride) { - /* Metal does not support overlapping loads. Truncate format vector length to prevent an assertion - * and hope it's not used by the shader. */ - MTLVertexFormat newFormat = mvkAdjustFormatVectorToSize((MTLVertexFormat)mtlFormat, pVKVB->stride); - reportError(VK_SUCCESS, "Found attribute with size (%u) larger than it's binding's stride (%u). Changing descriptor format from %s to %s.", - attrSize, pVKVB->stride, getPixelFormats()->getName((MTLVertexFormat)mtlFormat), getPixelFormats()->getName(newFormat)); - mtlFormat = (decltype(vaDesc.format))newFormat; - } - vaDesc.format = mtlFormat; + vaDesc.format = (decltype(vaDesc.format))getPixelFormats()->getMTLVertexFormat(pVKVA->format);; vaDesc.bufferIndex = (decltype(vaDesc.bufferIndex))getMetalBufferIndexForVertexAttributeBinding(vaBinding); vaDesc.offset = vaOffset; } } - // Run through the vertex bindings. Add a new Metal vertex layout for each translated binding, - // identical to the original layout. The translated binding will index into the same MTLBuffer, - // but at an offset that is one or more strides away from the original. - for (uint32_t i = 0; i < vbCnt; i++) { - const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i]; - uint32_t vbVACnt = shaderConfig.countShaderInputsAt(pVKVB->binding); - if (vbVACnt > 0) { - uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); - auto vbDesc = inputDesc.layouts[vbIdx]; - - uint32_t xldtVACnt = 0; - for (auto& xltdBind : _translatedVertexBindings) { - if (xltdBind.binding == pVKVB->binding) { - uint32_t vbXltdIdx = getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding); - auto vbXltdDesc = inputDesc.layouts[vbXltdIdx]; - vbXltdDesc.stride = vbDesc.stride; - vbXltdDesc.stepFunction = vbDesc.stepFunction; - vbXltdDesc.stepRate = vbDesc.stepRate; - xldtVACnt++; - } - } - - // If all of the vertex attributes at this vertex buffer binding have been translated, remove it. - if (xldtVACnt == vbVACnt) { vbDesc.stride = 0; } - } - } - - // Collect all bindings with zero divisors. We need to remember them so we can offset - // the vertex buffers during a draw. - for (uint32_t binding : zeroDivisorBindings) { - uint32_t stride = (uint32_t)inputDesc.layouts[getMetalBufferIndexForVertexAttributeBinding(binding)].stride; - _zeroDivisorVertexBindings.emplace_back(binding, stride); - } - return true; } @@ -1555,32 +1453,10 @@ static MTLVertexFormat mvkAdjustFormatVectorToSize(MTLVertexFormat format, uint3 uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); if (inputDesc.layouts[vbIdx].stepFunction == MTLVertexStepFunctionPerInstance) { inputDesc.layouts[vbIdx].stepRate = inputDesc.layouts[vbIdx].stepRate / oldViewCount * viewCount; - for (auto& xltdBind : _translatedVertexBindings) { - if (xltdBind.binding == pVKVB->binding) { - uint32_t vbXltdIdx = getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding); - inputDesc.layouts[vbXltdIdx].stepRate = inputDesc.layouts[vbXltdIdx].stepRate / oldViewCount * viewCount; - } - } } } } -// Returns a translated binding for the existing binding and translation offset, creating it if needed. -uint32_t MVKGraphicsPipeline::getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding) { - // See if a translated binding already exists (for example if more than one VA needs the same translation). - for (auto& xltdBind : _translatedVertexBindings) { - if (xltdBind.binding == binding && xltdBind.translationOffset == translationOffset) { - return xltdBind.translationBinding; - } - } - - // Get next available binding point and add a translation binding description for it - uint16_t xltdBindPt = (uint16_t)(maxBinding + _translatedVertexBindings.size() + 1); - _translatedVertexBindings.push_back( {.binding = (uint16_t)binding, .translationBinding = xltdBindPt, .translationOffset = translationOffset} ); - - return xltdBindPt; -} - void MVKGraphicsPipeline::addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkPipelineTessellationStateCreateInfo* pTS) { From 5b24d3e9ab72740ea79a52161399e1bd51401ecb Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Mon, 11 Sep 2023 11:19:00 -0500 Subject: [PATCH 4/7] Formatting --- MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm | 64 ++++++++++----------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm index 1bad0a52c..f9d1831ae 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm @@ -1358,26 +1358,26 @@ static VkFormat mvkFormatFromOutput(const SPIRVShaderOutput& output) { template bool MVKGraphicsPipeline::addVertexInputToPipeline(T* inputDesc, - const VkPipelineVertexInputStateCreateInfo* pVI, - const SPIRVToMSLConversionConfiguration& shaderConfig) { - // Collect extension structures - VkPipelineVertexInputDivisorStateCreateInfoEXT* pVertexInputDivisorState = nullptr; + const VkPipelineVertexInputStateCreateInfo* pVI, + const SPIRVToMSLConversionConfiguration& shaderConfig) { + // Collect extension structures + VkPipelineVertexInputDivisorStateCreateInfoEXT* pVertexInputDivisorState = nullptr; for (const auto* next = (VkBaseInStructure*)pVI->pNext; next; next = next->pNext) { - switch (next->sType) { - case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: - pVertexInputDivisorState = (VkPipelineVertexInputDivisorStateCreateInfoEXT*)next; - break; - default: - break; - } - } + switch (next->sType) { + case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: + pVertexInputDivisorState = (VkPipelineVertexInputDivisorStateCreateInfoEXT*)next; + break; + default: + break; + } + } - // Vertex buffer bindings + // Vertex buffer bindings uint32_t vbCnt = pVI->vertexBindingDescriptionCount; uint32_t maxBinding = 0; - for (uint32_t i = 0; i < vbCnt; i++) { - const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i]; - if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) { + for (uint32_t i = 0; i < vbCnt; i++) { + const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i]; + if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) { maxBinding = max(pVKVB->binding, maxBinding); uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); auto vbDesc = inputDesc.layouts[vbIdx]; @@ -1392,23 +1392,23 @@ static VkFormat mvkFormatFromOutput(const SPIRVShaderOutput& output) { vbDesc.stepFunction = (decltype(vbDesc.stepFunction))mvkMTLStepFunctionFromVkVertexInputRate(pVKVB->inputRate, isTessellationPipeline()); vbDesc.stepRate = 1; } - } - } + } + } - // Vertex buffer divisors (step rates) - if (pVertexInputDivisorState) { - uint32_t vbdCnt = pVertexInputDivisorState->vertexBindingDivisorCount; - for (uint32_t i = 0; i < vbdCnt; i++) { - const VkVertexInputBindingDivisorDescriptionEXT* pVKVB = &pVertexInputDivisorState->pVertexBindingDivisors[i]; - if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) { - uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); - MTLStepFunction fn = static_cast(inputDesc.layouts[vbIdx].stepFunction); - if (fn == MTLStepFunctionPerInstance || fn == MTLStepFunctionThreadPositionInGridY) { - inputDesc.layouts[vbIdx].stepRate = pVKVB->divisor; - } - } - } - } + // Vertex buffer divisors (step rates) + if (pVertexInputDivisorState) { + uint32_t vbdCnt = pVertexInputDivisorState->vertexBindingDivisorCount; + for (uint32_t i = 0; i < vbdCnt; i++) { + const VkVertexInputBindingDivisorDescriptionEXT* pVKVB = &pVertexInputDivisorState->pVertexBindingDivisors[i]; + if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) { + uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding); + MTLStepFunction fn = static_cast(inputDesc.layouts[vbIdx].stepFunction); + if (fn == MTLStepFunctionPerInstance || fn == MTLStepFunctionThreadPositionInGridY) { + inputDesc.layouts[vbIdx].stepRate = pVKVB->divisor; + } + } + } + } // Vertex attributes uint32_t vaCnt = pVI->vertexAttributeDescriptionCount; From d5162811a4de3863c98aee3c013db021f44ba6ff Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Tue, 12 Sep 2023 10:22:34 -0500 Subject: [PATCH 5/7] Remove vertex stride alignment restrictions --- MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index 3a54d6afd..73848f587 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -717,7 +717,7 @@ } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR: { auto* portabilityProps = (VkPhysicalDevicePortabilitySubsetPropertiesKHR*)next; - portabilityProps->minVertexInputBindingStrideAlignment = (uint32_t)_metalFeatures.vertexStrideAlignment; + portabilityProps->minVertexInputBindingStrideAlignment = 1; // Shader vertex loader has no restrictions break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: { From e66e196af42fc31cb06aff684becf88f5e862b75 Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Tue, 12 Sep 2023 14:30:58 -0500 Subject: [PATCH 6/7] Support vertex formats from shader vertex loading --- MoltenVK/MoltenVK/GPUObjects/MVKDevice.h | 3 +++ MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm | 1 + .../MoltenVK/GPUObjects/MVKPixelFormats.h | 1 + .../MoltenVK/GPUObjects/MVKPixelFormats.mm | 22 ++++++++++++++++++- 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h index 125bf9aa9..7925f8a85 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h @@ -131,6 +131,9 @@ class MVKPhysicalDevice : public MVKDispatchableVulkanAPIObject { /** Populates the specified structure with the features of this device. */ void getFeatures(VkPhysicalDeviceFeatures2* features); + /** Returns a structure with the features of this device */ + const VkPhysicalDeviceFeatures& getFeatures() const { return _features; } + /** Populates the specified structure with the properties of this device. */ void getProperties(VkPhysicalDeviceProperties* properties); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm index 73848f587..181251ed1 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm @@ -1642,6 +1642,7 @@ initMetalFeatures(); // Call second. initFeatures(); // Call third. initLimits(); // Call fourth. + _pixelFormats.modifyCapabilitiesFromPhysicalDevice(); initExtensions(); initMemoryProperties(); initExternalMemoryProperties(); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h index 479965b48..3f335def2 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h @@ -409,6 +409,7 @@ class MVKPixelFormats : public MVKBaseObject { #pragma mark Construction MVKPixelFormats(MVKPhysicalDevice* physicalDevice = nullptr); + void modifyCapabilitiesFromPhysicalDevice(); protected: MVKVkFormatDesc& getVkFormatDesc(VkFormat vkFormat); diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm index 4fd26e8ed..af7d08742 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm @@ -2063,6 +2063,21 @@ kMVKVkFormatFeatureFlagsBufVertex = (VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT), } MVKVkFormatFeatureFlags; +void MVKPixelFormats::modifyCapabilitiesFromPhysicalDevice() { + bool int64 = _physicalDevice->getFeatures().shaderInt64; + bool float64 = _physicalDevice->getFeatures().shaderFloat64; + for (MVKVkFormatDesc& desc : _vkFormatDescriptions) { + using namespace SPIRV_CROSS_NAMESPACE; + MSLFormatInfo info = CompilerMSL::get_format_info(static_cast(desc.vkFormat)); + if (info.log2_align > 2) { + bool supported = info.is_float ? float64 : int64; + if (!supported) { + mvkDisableFlags(desc.properties.bufferFeatures, kMVKVkFormatFeatureFlagsBufVertex); + } + } + } +} + // Sets the VkFormatProperties (optimal/linear/buffer) for the Vulkan format. void MVKPixelFormats::setFormatProperties(MVKVkFormatDesc& vkDesc) { @@ -2150,6 +2165,11 @@ enableFormatFeatures(Read, Buf, mtlPixFmtCaps, vkProps.bufferFeatures); enableFormatFeatures(Write, Buf, mtlPixFmtCaps, vkProps.bufferFeatures); enableFormatFeatures(Atomic, Buf, mtlPixFmtCaps, vkProps.bufferFeatures); - enableFormatFeatures(Vertex, Buf, getMTLVertexFormatDesc(vkDesc.mtlVertexFormat).mtlFmtCaps, vkProps.bufferFeatures); + using namespace SPIRV_CROSS_NAMESPACE; + MSLFormatInfo info = CompilerMSL::get_format_info(static_cast(vkDesc.vkFormat)); + bool vertex_supported = info.is_supported(); + if (vertex_supported) { + mvkEnableFlags(vkProps.bufferFeatures, kMVKVkFormatFeatureFlagsBufVertex); + } } } From 02321f2201190d6e049884b7b477a5643181c504 Mon Sep 17 00:00:00 2001 From: Evan Tang Date: Wed, 13 Sep 2023 11:26:41 -0500 Subject: [PATCH 7/7] Remove substitute vertex formats No longer needed --- .../MoltenVK/GPUObjects/MVKPixelFormats.h | 6 +- .../MoltenVK/GPUObjects/MVKPixelFormats.mm | 522 +++++++++--------- 2 files changed, 261 insertions(+), 267 deletions(-) diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h index 3f335def2..ca0615595 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.h @@ -141,9 +141,8 @@ typedef struct MVKVkFormatDesc { MTLPixelFormat mtlPixelFormat; MTLPixelFormat mtlPixelFormatSubstitute; MTLVertexFormat mtlVertexFormat; - MTLVertexFormat mtlVertexFormatSubstitute; - uint8_t chromaSubsamplingPlaneCount; - uint8_t chromaSubsamplingComponentBits; + uint8_t chromaSubsamplingPlaneCount; + uint8_t chromaSubsamplingComponentBits; VkExtent2D blockTexelSize; uint32_t bytesPerBlock; MVKFormatType formatType; @@ -158,7 +157,6 @@ typedef struct MVKVkFormatDesc { inline bool isSupportedOrSubstitutable() const { return isSupported() || (mtlPixelFormatSubstitute != MTLPixelFormatInvalid); }; inline bool vertexIsSupported() const { return (mtlVertexFormat != MTLVertexFormatInvalid); }; - inline bool vertexIsSupportedOrSubstitutable() const { return vertexIsSupported() || (mtlVertexFormatSubstitute != MTLVertexFormatInvalid); }; bool needsSwizzle() const { return componentMapping.r != VK_COMPONENT_SWIZZLE_IDENTITY || diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm index af7d08742..016a9e296 100644 --- a/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm +++ b/MoltenVK/MoltenVK/GPUObjects/MVKPixelFormats.mm @@ -780,21 +780,21 @@ buildVkFormatMaps(); } -#define addVkFormatDescFull(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, CSPC, CSCB, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, SWIZ_R, SWIZ_G, SWIZ_B, SWIZ_A) \ +#define addVkFormatDescFull(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, CSPC, CSCB, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, SWIZ_R, SWIZ_G, SWIZ_B, SWIZ_A) \ MVKAssert(fmtIdx < _vkFormatCount, "Attempting to describe %d VkFormats, but only have space for %d. Increase the value of _vkFormatCount", fmtIdx + 1, _vkFormatCount); \ - _vkFormatDescriptions[fmtIdx++] = { VK_FORMAT_ ##VK_FMT, MTLPixelFormat ##MTL_FMT, MTLPixelFormat ##MTL_FMT_ALT, MTLVertexFormat ##MTL_VTX_FMT, MTLVertexFormat ##MTL_VTX_FMT_ALT, \ - CSPC, CSCB, { BLK_W, BLK_H }, BLK_BYTE_CNT, kMVKFormat ##MVK_FMT_TYPE, { 0, 0, 0 }, \ - { VK_COMPONENT_SWIZZLE_ ##SWIZ_R, VK_COMPONENT_SWIZZLE_ ##SWIZ_G, VK_COMPONENT_SWIZZLE_ ##SWIZ_B, VK_COMPONENT_SWIZZLE_ ##SWIZ_A }, \ - "VK_FORMAT_" #VK_FMT, false } + _vkFormatDescriptions[fmtIdx++] = { VK_FORMAT_ ##VK_FMT, MTLPixelFormat ##MTL_FMT, MTLPixelFormat ##MTL_FMT_ALT, MTLVertexFormat ##MTL_VTX_FMT, \ + CSPC, CSCB, { BLK_W, BLK_H }, BLK_BYTE_CNT, kMVKFormat ##MVK_FMT_TYPE, { 0, 0, 0 }, \ + { VK_COMPONENT_SWIZZLE_ ##SWIZ_R, VK_COMPONENT_SWIZZLE_ ##SWIZ_G, VK_COMPONENT_SWIZZLE_ ##SWIZ_B, VK_COMPONENT_SWIZZLE_ ##SWIZ_A }, \ + "VK_FORMAT_" #VK_FMT, false } -#define addVkFormatDesc(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE) \ - addVkFormatDescFull(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, 0, 0, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, IDENTITY, IDENTITY, IDENTITY, IDENTITY) +#define addVkFormatDesc(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE) \ + addVkFormatDescFull(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, 0, 0, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, IDENTITY, IDENTITY, IDENTITY, IDENTITY) -#define addVkFormatDescSwizzled(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, SWIZ_R, SWIZ_G, SWIZ_B, SWIZ_A) \ - addVkFormatDescFull(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, MTL_VTX_FMT_ALT, 0, 0, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, SWIZ_R, SWIZ_G, SWIZ_B, SWIZ_A) +#define addVkFormatDescSwizzled(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, SWIZ_R, SWIZ_G, SWIZ_B, SWIZ_A) \ + addVkFormatDescFull(VK_FMT, MTL_FMT, MTL_FMT_ALT, MTL_VTX_FMT, 0, 0, BLK_W, BLK_H, BLK_BYTE_CNT, MVK_FMT_TYPE, SWIZ_R, SWIZ_G, SWIZ_B, SWIZ_A) #define addVkFormatDescChromaSubsampling(VK_FMT, MTL_FMT, CSPC, CSCB, BLK_W, BLK_H, BLK_BYTE_CNT) \ - addVkFormatDescFull(VK_FMT, MTL_FMT, Invalid, Invalid, Invalid, CSPC, CSCB, BLK_W, BLK_H, BLK_BYTE_CNT, ColorFloat, IDENTITY, IDENTITY, IDENTITY, IDENTITY) + addVkFormatDescFull(VK_FMT, MTL_FMT, Invalid, Invalid, CSPC, CSCB, BLK_W, BLK_H, BLK_BYTE_CNT, ColorFloat, IDENTITY, IDENTITY, IDENTITY, IDENTITY) void MVKPixelFormats::initVkFormatCapabilities() { @@ -805,257 +805,257 @@ // When adding to this list, be sure to ensure _vkFormatCount is large enough for the format count // UNDEFINED must come first. - addVkFormatDesc( UNDEFINED, Invalid, Invalid, Invalid, Invalid, 1, 1, 0, None ); - - addVkFormatDesc( R4G4_UNORM_PACK8, Invalid, Invalid, Invalid, Invalid, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R4G4B4A4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( B4G4R4A4_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDescSwizzled( A4R4G4B4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat, G, B, A, R ); - addVkFormatDescSwizzled( A4B4G4R4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat, A, B, G, R ); - - addVkFormatDesc( R5G6B5_UNORM_PACK16, B5G6R5Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( B5G6R5_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R5G5B5A1_UNORM_PACK16, A1BGR5Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( B5G5R5A1_UNORM_PACK16, Invalid, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( A1R5G5B5_UNORM_PACK16, BGR5A1Unorm, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); - - addVkFormatDesc( R8_UNORM, R8Unorm, Invalid, UCharNormalized, UChar2Normalized, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_SNORM, R8Snorm, Invalid, CharNormalized, Char2Normalized, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_USCALED, Invalid, Invalid, UChar, UChar2, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_SSCALED, Invalid, Invalid, Char, Char2, 1, 1, 1, ColorFloat ); - addVkFormatDesc( R8_UINT, R8Uint, Invalid, UChar, UChar2, 1, 1, 1, ColorUInt8 ); - addVkFormatDesc( R8_SINT, R8Sint, Invalid, Char, Char2, 1, 1, 1, ColorInt8 ); - addVkFormatDesc( R8_SRGB, R8Unorm_sRGB, Invalid, UCharNormalized, UChar2Normalized, 1, 1, 1, ColorFloat ); - - addVkFormatDesc( R8G8_UNORM, RG8Unorm, Invalid, UChar2Normalized, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_SNORM, RG8Snorm, Invalid, Char2Normalized, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_USCALED, Invalid, Invalid, UChar2, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_SSCALED, Invalid, Invalid, Char2, Invalid, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R8G8_UINT, RG8Uint, Invalid, UChar2, Invalid, 1, 1, 2, ColorUInt8 ); - addVkFormatDesc( R8G8_SINT, RG8Sint, Invalid, Char2, Invalid, 1, 1, 2, ColorInt8 ); - addVkFormatDesc( R8G8_SRGB, RG8Unorm_sRGB, Invalid, UChar2Normalized, Invalid, 1, 1, 2, ColorFloat ); - - addVkFormatDesc( R8G8B8_UNORM, Invalid, Invalid, UChar3Normalized, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_SNORM, Invalid, Invalid, Char3Normalized, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_USCALED, Invalid, Invalid, UChar3, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_SSCALED, Invalid, Invalid, Char3, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( R8G8B8_UINT, Invalid, Invalid, UChar3, Invalid, 1, 1, 3, ColorUInt8 ); - addVkFormatDesc( R8G8B8_SINT, Invalid, Invalid, Char3, Invalid, 1, 1, 3, ColorInt8 ); - addVkFormatDesc( R8G8B8_SRGB, Invalid, Invalid, UChar3Normalized, Invalid, 1, 1, 3, ColorFloat ); - - addVkFormatDesc( B8G8R8_UNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( B8G8R8_SNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( B8G8R8_USCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( B8G8R8_SSCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - addVkFormatDesc( B8G8R8_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorUInt8 ); - addVkFormatDesc( B8G8R8_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorInt8 ); - addVkFormatDesc( B8G8R8_SRGB, Invalid, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); - - addVkFormatDesc( R8G8B8A8_UNORM, RGBA8Unorm, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_SNORM, RGBA8Snorm, Invalid, Char4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_USCALED, Invalid, Invalid, UChar4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_SSCALED, Invalid, Invalid, Char4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R8G8B8A8_UINT, RGBA8Uint, Invalid, UChar4, Invalid, 1, 1, 4, ColorUInt8 ); - addVkFormatDesc( R8G8B8A8_SINT, RGBA8Sint, Invalid, Char4, Invalid, 1, 1, 4, ColorInt8 ); - addVkFormatDesc( R8G8B8A8_SRGB, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( B8G8R8A8_UNORM, BGRA8Unorm, Invalid, UChar4Normalized_BGRA, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_SNORM, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_USCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_SSCALED, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( B8G8R8A8_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt8 ); - addVkFormatDesc( B8G8R8A8_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt8 ); - addVkFormatDesc( B8G8R8A8_SRGB, BGRA8Unorm_sRGB, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( A8B8G8R8_UNORM_PACK32, RGBA8Unorm, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_SNORM_PACK32, RGBA8Snorm, Invalid, Char4Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_USCALED_PACK32, Invalid, Invalid, UChar4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_SSCALED_PACK32, Invalid, Invalid, Char4, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A8B8G8R8_UINT_PACK32, RGBA8Uint, Invalid, UChar4, Invalid, 1, 1, 4, ColorUInt8 ); - addVkFormatDesc( A8B8G8R8_SINT_PACK32, RGBA8Sint, Invalid, Char4, Invalid, 1, 1, 4, ColorInt8 ); - addVkFormatDesc( A8B8G8R8_SRGB_PACK32, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( A2R10G10B10_UNORM_PACK32, BGR10A2Unorm, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_SNORM_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_USCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_SSCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2R10G10B10_UINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); - addVkFormatDesc( A2R10G10B10_SINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); - - addVkFormatDesc( A2B10G10R10_UNORM_PACK32, RGB10A2Unorm, Invalid, UInt1010102Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_SNORM_PACK32, Invalid, Invalid, Int1010102Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_USCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_SSCALED_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( A2B10G10R10_UINT_PACK32, RGB10A2Uint, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); - addVkFormatDesc( A2B10G10R10_SINT_PACK32, Invalid, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); - - addVkFormatDesc( R16_UNORM, R16Unorm, Invalid, UShortNormalized, UShort2Normalized, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_SNORM, R16Snorm, Invalid, ShortNormalized, Short2Normalized, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_USCALED, Invalid, Invalid, UShort, UShort2, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_SSCALED, Invalid, Invalid, Short, Short2, 1, 1, 2, ColorFloat ); - addVkFormatDesc( R16_UINT, R16Uint, Invalid, UShort, UShort2, 1, 1, 2, ColorUInt16 ); - addVkFormatDesc( R16_SINT, R16Sint, Invalid, Short, Short2, 1, 1, 2, ColorInt16 ); - addVkFormatDesc( R16_SFLOAT, R16Float, Invalid, Half, Half2, 1, 1, 2, ColorFloat ); - - addVkFormatDesc( R16G16_UNORM, RG16Unorm, Invalid, UShort2Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_SNORM, RG16Snorm, Invalid, Short2Normalized, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_USCALED, Invalid, Invalid, UShort2, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_SSCALED, Invalid, Invalid, Short2, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( R16G16_UINT, RG16Uint, Invalid, UShort2, Invalid, 1, 1, 4, ColorUInt16 ); - addVkFormatDesc( R16G16_SINT, RG16Sint, Invalid, Short2, Invalid, 1, 1, 4, ColorInt16 ); - addVkFormatDesc( R16G16_SFLOAT, RG16Float, Invalid, Half2, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( R16G16B16_UNORM, Invalid, Invalid, UShort3Normalized, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_SNORM, Invalid, Invalid, Short3Normalized, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_USCALED, Invalid, Invalid, UShort3, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_SSCALED, Invalid, Invalid, Short3, Invalid, 1, 1, 6, ColorFloat ); - addVkFormatDesc( R16G16B16_UINT, Invalid, Invalid, UShort3, Invalid, 1, 1, 6, ColorUInt16 ); - addVkFormatDesc( R16G16B16_SINT, Invalid, Invalid, Short3, Invalid, 1, 1, 6, ColorInt16 ); - addVkFormatDesc( R16G16B16_SFLOAT, Invalid, Invalid, Half3, Invalid, 1, 1, 6, ColorFloat ); - - addVkFormatDesc( R16G16B16A16_UNORM, RGBA16Unorm, Invalid, UShort4Normalized, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_SNORM, RGBA16Snorm, Invalid, Short4Normalized, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_USCALED, Invalid, Invalid, UShort4, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_SSCALED, Invalid, Invalid, Short4, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R16G16B16A16_UINT, RGBA16Uint, Invalid, UShort4, Invalid, 1, 1, 8, ColorUInt16 ); - addVkFormatDesc( R16G16B16A16_SINT, RGBA16Sint, Invalid, Short4, Invalid, 1, 1, 8, ColorInt16 ); - addVkFormatDesc( R16G16B16A16_SFLOAT, RGBA16Float, Invalid, Half4, Invalid, 1, 1, 8, ColorFloat ); - - addVkFormatDesc( R32_UINT, R32Uint, Invalid, UInt, Invalid, 1, 1, 4, ColorUInt32 ); - addVkFormatDesc( R32_SINT, R32Sint, Invalid, Int, Invalid, 1, 1, 4, ColorInt32 ); - addVkFormatDesc( R32_SFLOAT, R32Float, Invalid, Float, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( R32G32_UINT, RG32Uint, Invalid, UInt2, Invalid, 1, 1, 8, ColorUInt32 ); - addVkFormatDesc( R32G32_SINT, RG32Sint, Invalid, Int2, Invalid, 1, 1, 8, ColorInt32 ); - addVkFormatDesc( R32G32_SFLOAT, RG32Float, Invalid, Float2, Invalid, 1, 1, 8, ColorFloat ); - - addVkFormatDesc( R32G32B32_UINT, Invalid, Invalid, UInt3, Invalid, 1, 1, 12, ColorUInt32 ); - addVkFormatDesc( R32G32B32_SINT, Invalid, Invalid, Int3, Invalid, 1, 1, 12, ColorInt32 ); - addVkFormatDesc( R32G32B32_SFLOAT, Invalid, Invalid, Float3, Invalid, 1, 1, 12, ColorFloat ); - - addVkFormatDesc( R32G32B32A32_UINT, RGBA32Uint, Invalid, UInt4, Invalid, 1, 1, 16, ColorUInt32 ); - addVkFormatDesc( R32G32B32A32_SINT, RGBA32Sint, Invalid, Int4, Invalid, 1, 1, 16, ColorInt32 ); - addVkFormatDesc( R32G32B32A32_SFLOAT, RGBA32Float, Invalid, Float4, Invalid, 1, 1, 16, ColorFloat ); - - addVkFormatDesc( R64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); - addVkFormatDesc( R64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); - - addVkFormatDesc( R64G64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); - addVkFormatDesc( R64G64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); - addVkFormatDesc( R64G64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); - - addVkFormatDesc( R64G64B64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); - addVkFormatDesc( R64G64B64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); - addVkFormatDesc( R64G64B64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); - - addVkFormatDesc( R64G64B64A64_UINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); - addVkFormatDesc( R64G64B64A64_SINT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); - addVkFormatDesc( R64G64B64A64_SFLOAT, Invalid, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); - - addVkFormatDesc( B10G11R11_UFLOAT_PACK32, RG11B10Float, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - addVkFormatDesc( E5B9G9R9_UFLOAT_PACK32, RGB9E5Float, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); - - addVkFormatDesc( D32_SFLOAT, Depth32Float, Invalid, Invalid, Invalid, 1, 1, 4, DepthStencil ); - addVkFormatDesc( D32_SFLOAT_S8_UINT, Depth32Float_Stencil8, Invalid, Invalid, Invalid, 1, 1, 5, DepthStencil ); - - addVkFormatDesc( S8_UINT, Stencil8, Invalid, Invalid, Invalid, 1, 1, 1, DepthStencil ); - - addVkFormatDesc( D16_UNORM, Depth16Unorm, Depth32Float, Invalid, Invalid, 1, 1, 2, DepthStencil ); - addVkFormatDesc( D16_UNORM_S8_UINT, Invalid, Depth16Unorm_Stencil8, Invalid, Invalid, 1, 1, 3, DepthStencil ); - addVkFormatDesc( D24_UNORM_S8_UINT, Depth24Unorm_Stencil8, Depth32Float_Stencil8, Invalid, Invalid, 1, 1, 4, DepthStencil ); - - addVkFormatDesc( X8_D24_UNORM_PACK32, Invalid, Depth24Unorm_Stencil8, Invalid, Invalid, 1, 1, 4, DepthStencil ); - - addVkFormatDesc( BC1_RGB_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC1_RGB_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC1_RGBA_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC1_RGBA_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - - addVkFormatDesc( BC2_UNORM_BLOCK, BC2_RGBA, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC2_SRGB_BLOCK, BC2_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( BC3_UNORM_BLOCK, BC3_RGBA, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC3_SRGB_BLOCK, BC3_RGBA_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( BC4_UNORM_BLOCK, BC4_RUnorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( BC4_SNORM_BLOCK, BC4_RSnorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - - addVkFormatDesc( BC5_UNORM_BLOCK, BC5_RGUnorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC5_SNORM_BLOCK, BC5_RGSnorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( BC6H_UFLOAT_BLOCK, BC6H_RGBUfloat, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC6H_SFLOAT_BLOCK, BC6H_RGBFloat, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( BC7_UNORM_BLOCK, BC7_RGBAUnorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( BC7_SRGB_BLOCK, BC7_RGBAUnorm_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( ETC2_R8G8B8_UNORM_BLOCK, ETC2_RGB8, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8_SRGB_BLOCK, ETC2_RGB8_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A1_UNORM_BLOCK, ETC2_RGB8A1, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A1_SRGB_BLOCK, ETC2_RGB8A1_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - - addVkFormatDesc( ETC2_R8G8B8A8_UNORM_BLOCK, EAC_RGBA8, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ETC2_R8G8B8A8_SRGB_BLOCK, EAC_RGBA8_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( EAC_R11_UNORM_BLOCK, EAC_R11Unorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( EAC_R11_SNORM_BLOCK, EAC_R11Snorm, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - - addVkFormatDesc( EAC_R11G11_UNORM_BLOCK, EAC_RG11Unorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( EAC_R11G11_SNORM_BLOCK, EAC_RG11Snorm, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - - addVkFormatDesc( ASTC_4x4_UNORM_BLOCK, ASTC_4x4_LDR, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_4x4_SFLOAT_BLOCK_EXT, ASTC_4x4_HDR, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_4x4_SRGB_BLOCK, ASTC_4x4_sRGB, Invalid, Invalid, Invalid, 4, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x4_UNORM_BLOCK, ASTC_5x4_LDR, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x4_SFLOAT_BLOCK_EXT, ASTC_5x4_HDR, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x4_SRGB_BLOCK, ASTC_5x4_sRGB, Invalid, Invalid, Invalid, 5, 4, 16, Compressed ); - addVkFormatDesc( ASTC_5x5_UNORM_BLOCK, ASTC_5x5_LDR, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); - addVkFormatDesc( ASTC_5x5_SFLOAT_BLOCK_EXT, ASTC_5x5_HDR, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); - addVkFormatDesc( ASTC_5x5_SRGB_BLOCK, ASTC_5x5_sRGB, Invalid, Invalid, Invalid, 5, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x5_UNORM_BLOCK, ASTC_6x5_LDR, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x5_SFLOAT_BLOCK_EXT, ASTC_6x5_HDR, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x5_SRGB_BLOCK, ASTC_6x5_sRGB, Invalid, Invalid, Invalid, 6, 5, 16, Compressed ); - addVkFormatDesc( ASTC_6x6_UNORM_BLOCK, ASTC_6x6_LDR, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); - addVkFormatDesc( ASTC_6x6_SFLOAT_BLOCK_EXT, ASTC_6x6_HDR, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); - addVkFormatDesc( ASTC_6x6_SRGB_BLOCK, ASTC_6x6_sRGB, Invalid, Invalid, Invalid, 6, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x5_UNORM_BLOCK, ASTC_8x5_LDR, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); - addVkFormatDesc( ASTC_8x5_SFLOAT_BLOCK_EXT, ASTC_8x5_HDR, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); - addVkFormatDesc( ASTC_8x5_SRGB_BLOCK, ASTC_8x5_sRGB, Invalid, Invalid, Invalid, 8, 5, 16, Compressed ); - addVkFormatDesc( ASTC_8x6_UNORM_BLOCK, ASTC_8x6_LDR, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x6_SFLOAT_BLOCK_EXT, ASTC_8x6_HDR, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x6_SRGB_BLOCK, ASTC_8x6_sRGB, Invalid, Invalid, Invalid, 8, 6, 16, Compressed ); - addVkFormatDesc( ASTC_8x8_UNORM_BLOCK, ASTC_8x8_LDR, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); - addVkFormatDesc( ASTC_8x8_SFLOAT_BLOCK_EXT, ASTC_8x8_HDR, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); - addVkFormatDesc( ASTC_8x8_SRGB_BLOCK, ASTC_8x8_sRGB, Invalid, Invalid, Invalid, 8, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x5_UNORM_BLOCK, ASTC_10x5_LDR, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); - addVkFormatDesc( ASTC_10x5_SFLOAT_BLOCK_EXT, ASTC_10x5_HDR, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); - addVkFormatDesc( ASTC_10x5_SRGB_BLOCK, ASTC_10x5_sRGB, Invalid, Invalid, Invalid, 10, 5, 16, Compressed ); - addVkFormatDesc( ASTC_10x6_UNORM_BLOCK, ASTC_10x6_LDR, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); - addVkFormatDesc( ASTC_10x6_SFLOAT_BLOCK_EXT, ASTC_10x6_HDR, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); - addVkFormatDesc( ASTC_10x6_SRGB_BLOCK, ASTC_10x6_sRGB, Invalid, Invalid, Invalid, 10, 6, 16, Compressed ); - addVkFormatDesc( ASTC_10x8_UNORM_BLOCK, ASTC_10x8_LDR, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x8_SFLOAT_BLOCK_EXT, ASTC_10x8_HDR, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x8_SRGB_BLOCK, ASTC_10x8_sRGB, Invalid, Invalid, Invalid, 10, 8, 16, Compressed ); - addVkFormatDesc( ASTC_10x10_UNORM_BLOCK, ASTC_10x10_LDR, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); - addVkFormatDesc( ASTC_10x10_SFLOAT_BLOCK_EXT, ASTC_10x10_HDR, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); - addVkFormatDesc( ASTC_10x10_SRGB_BLOCK, ASTC_10x10_sRGB, Invalid, Invalid, Invalid, 10, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x10_UNORM_BLOCK, ASTC_12x10_LDR, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x10_SFLOAT_BLOCK_EXT, ASTC_12x10_HDR, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x10_SRGB_BLOCK, ASTC_12x10_sRGB, Invalid, Invalid, Invalid, 12, 10, 16, Compressed ); - addVkFormatDesc( ASTC_12x12_UNORM_BLOCK, ASTC_12x12_LDR, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); - addVkFormatDesc( ASTC_12x12_SFLOAT_BLOCK_EXT, ASTC_12x12_HDR, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); - addVkFormatDesc( ASTC_12x12_SRGB_BLOCK, ASTC_12x12_sRGB, Invalid, Invalid, Invalid, 12, 12, 16, Compressed ); + addVkFormatDesc( UNDEFINED, Invalid, Invalid, Invalid, 1, 1, 0, None ); + + addVkFormatDesc( R4G4_UNORM_PACK8, Invalid, Invalid, Invalid, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R4G4B4A4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( B4G4R4A4_UNORM_PACK16, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDescSwizzled( A4R4G4B4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, 1, 1, 2, ColorFloat, G, B, A, R ); + addVkFormatDescSwizzled( A4B4G4R4_UNORM_PACK16, ABGR4Unorm, Invalid, Invalid, 1, 1, 2, ColorFloat, A, B, G, R ); + + addVkFormatDesc( R5G6B5_UNORM_PACK16, B5G6R5Unorm, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( B5G6R5_UNORM_PACK16, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R5G5B5A1_UNORM_PACK16, A1BGR5Unorm, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( B5G5R5A1_UNORM_PACK16, Invalid, Invalid, Invalid, 1, 1, 2, ColorFloat ); + addVkFormatDesc( A1R5G5B5_UNORM_PACK16, BGR5A1Unorm, Invalid, Invalid, 1, 1, 2, ColorFloat ); + + addVkFormatDesc( R8_UNORM, R8Unorm, Invalid, UCharNormalized, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_SNORM, R8Snorm, Invalid, CharNormalized, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_USCALED, Invalid, Invalid, UChar, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_SSCALED, Invalid, Invalid, Char, 1, 1, 1, ColorFloat ); + addVkFormatDesc( R8_UINT, R8Uint, Invalid, UChar, 1, 1, 1, ColorUInt8 ); + addVkFormatDesc( R8_SINT, R8Sint, Invalid, Char, 1, 1, 1, ColorInt8 ); + addVkFormatDesc( R8_SRGB, R8Unorm_sRGB, Invalid, UCharNormalized, 1, 1, 1, ColorFloat ); + + addVkFormatDesc( R8G8_UNORM, RG8Unorm, Invalid, UChar2Normalized, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_SNORM, RG8Snorm, Invalid, Char2Normalized, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_USCALED, Invalid, Invalid, UChar2, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_SSCALED, Invalid, Invalid, Char2, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R8G8_UINT, RG8Uint, Invalid, UChar2, 1, 1, 2, ColorUInt8 ); + addVkFormatDesc( R8G8_SINT, RG8Sint, Invalid, Char2, 1, 1, 2, ColorInt8 ); + addVkFormatDesc( R8G8_SRGB, RG8Unorm_sRGB, Invalid, UChar2Normalized, 1, 1, 2, ColorFloat ); + + addVkFormatDesc( R8G8B8_UNORM, Invalid, Invalid, UChar3Normalized, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_SNORM, Invalid, Invalid, Char3Normalized, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_USCALED, Invalid, Invalid, UChar3, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_SSCALED, Invalid, Invalid, Char3, 1, 1, 3, ColorFloat ); + addVkFormatDesc( R8G8B8_UINT, Invalid, Invalid, UChar3, 1, 1, 3, ColorUInt8 ); + addVkFormatDesc( R8G8B8_SINT, Invalid, Invalid, Char3, 1, 1, 3, ColorInt8 ); + addVkFormatDesc( R8G8B8_SRGB, Invalid, Invalid, UChar3Normalized, 1, 1, 3, ColorFloat ); + + addVkFormatDesc( B8G8R8_UNORM, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( B8G8R8_SNORM, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( B8G8R8_USCALED, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( B8G8R8_SSCALED, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + addVkFormatDesc( B8G8R8_UINT, Invalid, Invalid, Invalid, 1, 1, 3, ColorUInt8 ); + addVkFormatDesc( B8G8R8_SINT, Invalid, Invalid, Invalid, 1, 1, 3, ColorInt8 ); + addVkFormatDesc( B8G8R8_SRGB, Invalid, Invalid, Invalid, 1, 1, 3, ColorFloat ); + + addVkFormatDesc( R8G8B8A8_UNORM, RGBA8Unorm, Invalid, UChar4Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_SNORM, RGBA8Snorm, Invalid, Char4Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_USCALED, Invalid, Invalid, UChar4, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_SSCALED, Invalid, Invalid, Char4, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R8G8B8A8_UINT, RGBA8Uint, Invalid, UChar4, 1, 1, 4, ColorUInt8 ); + addVkFormatDesc( R8G8B8A8_SINT, RGBA8Sint, Invalid, Char4, 1, 1, 4, ColorInt8 ); + addVkFormatDesc( R8G8B8A8_SRGB, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( B8G8R8A8_UNORM, BGRA8Unorm, Invalid, UChar4Normalized_BGRA, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_SNORM, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_USCALED, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_SSCALED, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( B8G8R8A8_UINT, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt8 ); + addVkFormatDesc( B8G8R8A8_SINT, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt8 ); + addVkFormatDesc( B8G8R8A8_SRGB, BGRA8Unorm_sRGB, Invalid, Invalid, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( A8B8G8R8_UNORM_PACK32, RGBA8Unorm, Invalid, UChar4Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_SNORM_PACK32, RGBA8Snorm, Invalid, Char4Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_USCALED_PACK32, Invalid, Invalid, UChar4, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_SSCALED_PACK32, Invalid, Invalid, Char4, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A8B8G8R8_UINT_PACK32, RGBA8Uint, Invalid, UChar4, 1, 1, 4, ColorUInt8 ); + addVkFormatDesc( A8B8G8R8_SINT_PACK32, RGBA8Sint, Invalid, Char4, 1, 1, 4, ColorInt8 ); + addVkFormatDesc( A8B8G8R8_SRGB_PACK32, RGBA8Unorm_sRGB, Invalid, UChar4Normalized, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( A2R10G10B10_UNORM_PACK32, BGR10A2Unorm, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_SNORM_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_USCALED_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_SSCALED_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2R10G10B10_UINT_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); + addVkFormatDesc( A2R10G10B10_SINT_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); + + addVkFormatDesc( A2B10G10R10_UNORM_PACK32, RGB10A2Unorm, Invalid, UInt1010102Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_SNORM_PACK32, Invalid, Invalid, Int1010102Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_USCALED_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_SSCALED_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( A2B10G10R10_UINT_PACK32, RGB10A2Uint, Invalid, Invalid, 1, 1, 4, ColorUInt16 ); + addVkFormatDesc( A2B10G10R10_SINT_PACK32, Invalid, Invalid, Invalid, 1, 1, 4, ColorInt16 ); + + addVkFormatDesc( R16_UNORM, R16Unorm, Invalid, UShortNormalized, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_SNORM, R16Snorm, Invalid, ShortNormalized, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_USCALED, Invalid, Invalid, UShort, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_SSCALED, Invalid, Invalid, Short, 1, 1, 2, ColorFloat ); + addVkFormatDesc( R16_UINT, R16Uint, Invalid, UShort, 1, 1, 2, ColorUInt16 ); + addVkFormatDesc( R16_SINT, R16Sint, Invalid, Short, 1, 1, 2, ColorInt16 ); + addVkFormatDesc( R16_SFLOAT, R16Float, Invalid, Half, 1, 1, 2, ColorFloat ); + + addVkFormatDesc( R16G16_UNORM, RG16Unorm, Invalid, UShort2Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_SNORM, RG16Snorm, Invalid, Short2Normalized, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_USCALED, Invalid, Invalid, UShort2, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_SSCALED, Invalid, Invalid, Short2, 1, 1, 4, ColorFloat ); + addVkFormatDesc( R16G16_UINT, RG16Uint, Invalid, UShort2, 1, 1, 4, ColorUInt16 ); + addVkFormatDesc( R16G16_SINT, RG16Sint, Invalid, Short2, 1, 1, 4, ColorInt16 ); + addVkFormatDesc( R16G16_SFLOAT, RG16Float, Invalid, Half2, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( R16G16B16_UNORM, Invalid, Invalid, UShort3Normalized, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_SNORM, Invalid, Invalid, Short3Normalized, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_USCALED, Invalid, Invalid, UShort3, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_SSCALED, Invalid, Invalid, Short3, 1, 1, 6, ColorFloat ); + addVkFormatDesc( R16G16B16_UINT, Invalid, Invalid, UShort3, 1, 1, 6, ColorUInt16 ); + addVkFormatDesc( R16G16B16_SINT, Invalid, Invalid, Short3, 1, 1, 6, ColorInt16 ); + addVkFormatDesc( R16G16B16_SFLOAT, Invalid, Invalid, Half3, 1, 1, 6, ColorFloat ); + + addVkFormatDesc( R16G16B16A16_UNORM, RGBA16Unorm, Invalid, UShort4Normalized, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_SNORM, RGBA16Snorm, Invalid, Short4Normalized, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_USCALED, Invalid, Invalid, UShort4, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_SSCALED, Invalid, Invalid, Short4, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R16G16B16A16_UINT, RGBA16Uint, Invalid, UShort4, 1, 1, 8, ColorUInt16 ); + addVkFormatDesc( R16G16B16A16_SINT, RGBA16Sint, Invalid, Short4, 1, 1, 8, ColorInt16 ); + addVkFormatDesc( R16G16B16A16_SFLOAT, RGBA16Float, Invalid, Half4, 1, 1, 8, ColorFloat ); + + addVkFormatDesc( R32_UINT, R32Uint, Invalid, UInt, 1, 1, 4, ColorUInt32 ); + addVkFormatDesc( R32_SINT, R32Sint, Invalid, Int, 1, 1, 4, ColorInt32 ); + addVkFormatDesc( R32_SFLOAT, R32Float, Invalid, Float, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( R32G32_UINT, RG32Uint, Invalid, UInt2, 1, 1, 8, ColorUInt32 ); + addVkFormatDesc( R32G32_SINT, RG32Sint, Invalid, Int2, 1, 1, 8, ColorInt32 ); + addVkFormatDesc( R32G32_SFLOAT, RG32Float, Invalid, Float2, 1, 1, 8, ColorFloat ); + + addVkFormatDesc( R32G32B32_UINT, Invalid, Invalid, UInt3, 1, 1, 12, ColorUInt32 ); + addVkFormatDesc( R32G32B32_SINT, Invalid, Invalid, Int3, 1, 1, 12, ColorInt32 ); + addVkFormatDesc( R32G32B32_SFLOAT, Invalid, Invalid, Float3, 1, 1, 12, ColorFloat ); + + addVkFormatDesc( R32G32B32A32_UINT, RGBA32Uint, Invalid, UInt4, 1, 1, 16, ColorUInt32 ); + addVkFormatDesc( R32G32B32A32_SINT, RGBA32Sint, Invalid, Int4, 1, 1, 16, ColorInt32 ); + addVkFormatDesc( R32G32B32A32_SFLOAT, RGBA32Float, Invalid, Float4, 1, 1, 16, ColorFloat ); + + addVkFormatDesc( R64_UINT, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R64_SINT, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); + addVkFormatDesc( R64_SFLOAT, Invalid, Invalid, Invalid, 1, 1, 8, ColorFloat ); + + addVkFormatDesc( R64G64_UINT, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); + addVkFormatDesc( R64G64_SINT, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); + addVkFormatDesc( R64G64_SFLOAT, Invalid, Invalid, Invalid, 1, 1, 16, ColorFloat ); + + addVkFormatDesc( R64G64B64_UINT, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); + addVkFormatDesc( R64G64B64_SINT, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); + addVkFormatDesc( R64G64B64_SFLOAT, Invalid, Invalid, Invalid, 1, 1, 24, ColorFloat ); + + addVkFormatDesc( R64G64B64A64_UINT, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); + addVkFormatDesc( R64G64B64A64_SINT, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); + addVkFormatDesc( R64G64B64A64_SFLOAT, Invalid, Invalid, Invalid, 1, 1, 32, ColorFloat ); + + addVkFormatDesc( B10G11R11_UFLOAT_PACK32, RG11B10Float, Invalid, Invalid, 1, 1, 4, ColorFloat ); + addVkFormatDesc( E5B9G9R9_UFLOAT_PACK32, RGB9E5Float, Invalid, Invalid, 1, 1, 4, ColorFloat ); + + addVkFormatDesc( D32_SFLOAT, Depth32Float, Invalid, Invalid, 1, 1, 4, DepthStencil ); + addVkFormatDesc( D32_SFLOAT_S8_UINT, Depth32Float_Stencil8, Invalid, Invalid, 1, 1, 5, DepthStencil ); + + addVkFormatDesc( S8_UINT, Stencil8, Invalid, Invalid, 1, 1, 1, DepthStencil ); + + addVkFormatDesc( D16_UNORM, Depth16Unorm, Depth32Float, Invalid, 1, 1, 2, DepthStencil ); + addVkFormatDesc( D16_UNORM_S8_UINT, Invalid, Depth16Unorm_Stencil8, Invalid, 1, 1, 3, DepthStencil ); + addVkFormatDesc( D24_UNORM_S8_UINT, Depth24Unorm_Stencil8, Depth32Float_Stencil8, Invalid, 1, 1, 4, DepthStencil ); + + addVkFormatDesc( X8_D24_UNORM_PACK32, Invalid, Depth24Unorm_Stencil8, Invalid, 1, 1, 4, DepthStencil ); + + addVkFormatDesc( BC1_RGB_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGB_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGBA_UNORM_BLOCK, BC1_RGBA, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC1_RGBA_SRGB_BLOCK, BC1_RGBA_sRGB, Invalid, Invalid, 4, 4, 8, Compressed ); + + addVkFormatDesc( BC2_UNORM_BLOCK, BC2_RGBA, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC2_SRGB_BLOCK, BC2_RGBA_sRGB, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( BC3_UNORM_BLOCK, BC3_RGBA, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC3_SRGB_BLOCK, BC3_RGBA_sRGB, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( BC4_UNORM_BLOCK, BC4_RUnorm, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( BC4_SNORM_BLOCK, BC4_RSnorm, Invalid, Invalid, 4, 4, 8, Compressed ); + + addVkFormatDesc( BC5_UNORM_BLOCK, BC5_RGUnorm, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC5_SNORM_BLOCK, BC5_RGSnorm, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( BC6H_UFLOAT_BLOCK, BC6H_RGBUfloat, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC6H_SFLOAT_BLOCK, BC6H_RGBFloat, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( BC7_UNORM_BLOCK, BC7_RGBAUnorm, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( BC7_SRGB_BLOCK, BC7_RGBAUnorm_sRGB, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( ETC2_R8G8B8_UNORM_BLOCK, ETC2_RGB8, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8_SRGB_BLOCK, ETC2_RGB8_sRGB, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A1_UNORM_BLOCK, ETC2_RGB8A1, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A1_SRGB_BLOCK, ETC2_RGB8A1_sRGB, Invalid, Invalid, 4, 4, 8, Compressed ); + + addVkFormatDesc( ETC2_R8G8B8A8_UNORM_BLOCK, EAC_RGBA8, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ETC2_R8G8B8A8_SRGB_BLOCK, EAC_RGBA8_sRGB, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( EAC_R11_UNORM_BLOCK, EAC_R11Unorm, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( EAC_R11_SNORM_BLOCK, EAC_R11Snorm, Invalid, Invalid, 4, 4, 8, Compressed ); + + addVkFormatDesc( EAC_R11G11_UNORM_BLOCK, EAC_RG11Unorm, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( EAC_R11G11_SNORM_BLOCK, EAC_RG11Snorm, Invalid, Invalid, 4, 4, 16, Compressed ); + + addVkFormatDesc( ASTC_4x4_UNORM_BLOCK, ASTC_4x4_LDR, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ASTC_4x4_SFLOAT_BLOCK_EXT, ASTC_4x4_HDR, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ASTC_4x4_SRGB_BLOCK, ASTC_4x4_sRGB, Invalid, Invalid, 4, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x4_UNORM_BLOCK, ASTC_5x4_LDR, Invalid, Invalid, 5, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x4_SFLOAT_BLOCK_EXT, ASTC_5x4_HDR, Invalid, Invalid, 5, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x4_SRGB_BLOCK, ASTC_5x4_sRGB, Invalid, Invalid, 5, 4, 16, Compressed ); + addVkFormatDesc( ASTC_5x5_UNORM_BLOCK, ASTC_5x5_LDR, Invalid, Invalid, 5, 5, 16, Compressed ); + addVkFormatDesc( ASTC_5x5_SFLOAT_BLOCK_EXT, ASTC_5x5_HDR, Invalid, Invalid, 5, 5, 16, Compressed ); + addVkFormatDesc( ASTC_5x5_SRGB_BLOCK, ASTC_5x5_sRGB, Invalid, Invalid, 5, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x5_UNORM_BLOCK, ASTC_6x5_LDR, Invalid, Invalid, 6, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x5_SFLOAT_BLOCK_EXT, ASTC_6x5_HDR, Invalid, Invalid, 6, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x5_SRGB_BLOCK, ASTC_6x5_sRGB, Invalid, Invalid, 6, 5, 16, Compressed ); + addVkFormatDesc( ASTC_6x6_UNORM_BLOCK, ASTC_6x6_LDR, Invalid, Invalid, 6, 6, 16, Compressed ); + addVkFormatDesc( ASTC_6x6_SFLOAT_BLOCK_EXT, ASTC_6x6_HDR, Invalid, Invalid, 6, 6, 16, Compressed ); + addVkFormatDesc( ASTC_6x6_SRGB_BLOCK, ASTC_6x6_sRGB, Invalid, Invalid, 6, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x5_UNORM_BLOCK, ASTC_8x5_LDR, Invalid, Invalid, 8, 5, 16, Compressed ); + addVkFormatDesc( ASTC_8x5_SFLOAT_BLOCK_EXT, ASTC_8x5_HDR, Invalid, Invalid, 8, 5, 16, Compressed ); + addVkFormatDesc( ASTC_8x5_SRGB_BLOCK, ASTC_8x5_sRGB, Invalid, Invalid, 8, 5, 16, Compressed ); + addVkFormatDesc( ASTC_8x6_UNORM_BLOCK, ASTC_8x6_LDR, Invalid, Invalid, 8, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x6_SFLOAT_BLOCK_EXT, ASTC_8x6_HDR, Invalid, Invalid, 8, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x6_SRGB_BLOCK, ASTC_8x6_sRGB, Invalid, Invalid, 8, 6, 16, Compressed ); + addVkFormatDesc( ASTC_8x8_UNORM_BLOCK, ASTC_8x8_LDR, Invalid, Invalid, 8, 8, 16, Compressed ); + addVkFormatDesc( ASTC_8x8_SFLOAT_BLOCK_EXT, ASTC_8x8_HDR, Invalid, Invalid, 8, 8, 16, Compressed ); + addVkFormatDesc( ASTC_8x8_SRGB_BLOCK, ASTC_8x8_sRGB, Invalid, Invalid, 8, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x5_UNORM_BLOCK, ASTC_10x5_LDR, Invalid, Invalid, 10, 5, 16, Compressed ); + addVkFormatDesc( ASTC_10x5_SFLOAT_BLOCK_EXT, ASTC_10x5_HDR, Invalid, Invalid, 10, 5, 16, Compressed ); + addVkFormatDesc( ASTC_10x5_SRGB_BLOCK, ASTC_10x5_sRGB, Invalid, Invalid, 10, 5, 16, Compressed ); + addVkFormatDesc( ASTC_10x6_UNORM_BLOCK, ASTC_10x6_LDR, Invalid, Invalid, 10, 6, 16, Compressed ); + addVkFormatDesc( ASTC_10x6_SFLOAT_BLOCK_EXT, ASTC_10x6_HDR, Invalid, Invalid, 10, 6, 16, Compressed ); + addVkFormatDesc( ASTC_10x6_SRGB_BLOCK, ASTC_10x6_sRGB, Invalid, Invalid, 10, 6, 16, Compressed ); + addVkFormatDesc( ASTC_10x8_UNORM_BLOCK, ASTC_10x8_LDR, Invalid, Invalid, 10, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x8_SFLOAT_BLOCK_EXT, ASTC_10x8_HDR, Invalid, Invalid, 10, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x8_SRGB_BLOCK, ASTC_10x8_sRGB, Invalid, Invalid, 10, 8, 16, Compressed ); + addVkFormatDesc( ASTC_10x10_UNORM_BLOCK, ASTC_10x10_LDR, Invalid, Invalid, 10, 10, 16, Compressed ); + addVkFormatDesc( ASTC_10x10_SFLOAT_BLOCK_EXT, ASTC_10x10_HDR, Invalid, Invalid, 10, 10, 16, Compressed ); + addVkFormatDesc( ASTC_10x10_SRGB_BLOCK, ASTC_10x10_sRGB, Invalid, Invalid, 10, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x10_UNORM_BLOCK, ASTC_12x10_LDR, Invalid, Invalid, 12, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x10_SFLOAT_BLOCK_EXT, ASTC_12x10_HDR, Invalid, Invalid, 12, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x10_SRGB_BLOCK, ASTC_12x10_sRGB, Invalid, Invalid, 12, 10, 16, Compressed ); + addVkFormatDesc( ASTC_12x12_UNORM_BLOCK, ASTC_12x12_LDR, Invalid, Invalid, 12, 12, 16, Compressed ); + addVkFormatDesc( ASTC_12x12_SFLOAT_BLOCK_EXT, ASTC_12x12_HDR, Invalid, Invalid, 12, 12, 16, Compressed ); + addVkFormatDesc( ASTC_12x12_SRGB_BLOCK, ASTC_12x12_sRGB, Invalid, Invalid, 12, 12, 16, Compressed ); // Extension VK_IMG_format_pvrtc - addVkFormatDesc( PVRTC1_2BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_2BPP, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC1_4BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_4BPP, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_2BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_4BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( PVRTC1_2BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_2BPP_sRGB, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC1_4BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_4BPP_sRGB, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_2BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); - addVkFormatDesc( PVRTC2_4BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_2BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_2BPP, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_4BPP_UNORM_BLOCK_IMG, PVRTC_RGBA_4BPP, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_2BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_4BPP_UNORM_BLOCK_IMG, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_2BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_2BPP_sRGB, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC1_4BPP_SRGB_BLOCK_IMG, PVRTC_RGBA_4BPP_sRGB, Invalid, Invalid, 4, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_2BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, 8, 4, 8, Compressed ); + addVkFormatDesc( PVRTC2_4BPP_SRGB_BLOCK_IMG, Invalid, Invalid, Invalid, 4, 4, 8, Compressed ); // Extension VK_KHR_sampler_ycbcr_conversion addVkFormatDescChromaSubsampling( G8B8G8R8_422_UNORM, GBGR422, 1, 8, 2, 1, 4 ); @@ -2022,10 +2022,6 @@ if ( !mtlDesc.vkFormat ) { mtlDesc.vkFormat = vkFmt; } if ( !mtlDesc.isSupported() ) { vkDesc.mtlVertexFormat = MTLVertexFormatInvalid; } } - if (vkDesc.mtlVertexFormatSubstitute) { - auto& mtlDesc = getMTLVertexFormatDesc(vkDesc.mtlVertexFormatSubstitute); - if ( !mtlDesc.isSupported() ) { vkDesc.mtlVertexFormatSubstitute = MTLVertexFormatInvalid; } - } // Set Vulkan format properties setFormatProperties(vkDesc);