blob: deee792a3ec5b29e746d65cb3f585a6c29b879c8 [file] [log] [blame]
/*
* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (c) 2015-2019 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <olvaffe@gmail.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Mike Stroyan <mike@LunarG.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
* Author: Shannon McPherson <shannon@lunarg.com>
* Author: John Zulauf <jzulauf@lunarg.com>
*/
#include "cast_utils.h"
#include "layer_validation_tests.h"
//
// POSITIVE VALIDATION TESTS
//
// These tests do not expect to encounter ANY validation errors pass only if this is true
TEST_F(VkPositiveLayerTest, NullFunctionPointer) {
TEST_DESCRIPTION("On 1_0 instance , call GetDeviceProcAddr on promoted 1_1 device-level entrypoint");
SetTargetApiVersion(VK_API_VERSION_1_0);
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, "VK_KHR_get_memory_requirements2")) {
m_device_extension_names.push_back("VK_KHR_get_memory_requirements2");
} else {
printf("%s VK_KHR_get_memory_reqirements2 extension not supported, skipping NullFunctionPointer test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
auto fpGetBufferMemoryRequirements =
(PFN_vkGetBufferMemoryRequirements2)vk::GetDeviceProcAddr(m_device->device(), "vkGetBufferMemoryRequirements2");
if (fpGetBufferMemoryRequirements) {
m_errorMonitor->SetError("Null was expected!");
}
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SecondaryCommandBufferBarrier) {
TEST_DESCRIPTION("Add a pipeline barrier in a secondary command buffer");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
// A renderpass with a single subpass that declared a self-dependency
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep};
VkRenderPass rp;
VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
rp,
fb,
{{
0,
0,
},
{32, 32}},
0,
nullptr};
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
nullptr,
rp,
0,
VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors
VK_FALSE,
0,
0};
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT,
&cbii};
vk::BeginCommandBuffer(secondary.handle(), &cbbi);
VkMemoryBarrier mem_barrier = {};
mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
mem_barrier.pNext = NULL;
mem_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
mem_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
vk::CmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr);
image.ImageMemoryBarrier(&secondary, VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
secondary.end();
vk::CmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, RenderPassCreateAttachmentUsedTwiceOK) {
TEST_DESCRIPTION("Attachment is used simultaneously as color and input, with the same layout. This is OK.");
ASSERT_NO_FATAL_FAILURE(Init());
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr};
VkRenderPass rp;
m_errorMonitor->ExpectSuccess();
vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyNotFound();
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkPositiveLayerTest, RenderPassCreateInitialLayoutUndefined) {
TEST_DESCRIPTION(
"Ensure that CmdBeginRenderPass with an attachment's initialLayout of VK_IMAGE_LAYOUT_UNDEFINED works when the command "
"buffer has prior knowledge of that attachment's layout.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with one color attachment.
VkAttachmentDescription attachment = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Record a single command buffer which uses this renderpass twice. The
// bug is triggered at the beginning of the second renderpass, when the
// command buffer already has a layout recorded for the attachment.
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vk::CmdEndRenderPass(m_commandBuffer->handle());
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyNotFound();
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
vk::DestroyImageView(m_device->device(), view, nullptr);
}
TEST_F(VkPositiveLayerTest, RenderPassCreateAttachmentLayoutWithLoadOpThenReadOnly) {
TEST_DESCRIPTION(
"Positive test where we create a renderpass with an attachment that uses LOAD_OP_CLEAR, the first subpass has a valid "
"layout, and a second subpass then uses a valid *READ_ONLY* layout.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkAttachmentReference attach[2] = {};
attach[0].attachment = 0;
attach[0].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attach[1].attachment = 0;
attach[1].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
VkSubpassDescription subpasses[2] = {};
// First subpass clears DS attach on load
subpasses[0].pDepthStencilAttachment = &attach[0];
// 2nd subpass reads in DS as input attachment
subpasses[1].inputAttachmentCount = 1;
subpasses[1].pInputAttachments = &attach[1];
VkAttachmentDescription attach_desc = {};
attach_desc.format = depth_format;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
VkRenderPassCreateInfo rpci = {};
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rpci.attachmentCount = 1;
rpci.pAttachments = &attach_desc;
rpci.subpassCount = 2;
rpci.pSubpasses = subpasses;
// Now create RenderPass and verify no errors
VkRenderPass rp;
vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyNotFound();
vk::DestroyRenderPass(m_device->device(), rp, NULL);
}
TEST_F(VkPositiveLayerTest, RenderPassBeginSubpassZeroTransitionsApplied) {
TEST_DESCRIPTION("Ensure that CmdBeginRenderPass applies the layout transitions for the first subpass");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with one color attachment.
VkAttachmentDescription attachment = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep};
VkResult err;
VkRenderPass rp;
err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Record a single command buffer which issues a pipeline barrier w/
// image memory barrier for the attachment. This detects the previously
// missing tracking of the subpass layout by throwing a validation error
// if it doesn't occur.
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
image.ImageMemoryBarrier(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkPositiveLayerTest, RenderPassBeginTransitionsAttachmentUnused) {
TEST_DESCRIPTION(
"Ensure that layout transitions work correctly without errors, when an attachment reference is VK_ATTACHMENT_UNUSED");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
// A renderpass with no attachments
VkAttachmentReference att_ref = {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 32, 32, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Record a command buffer which just begins and ends the renderpass. The
// bug manifests in BeginRenderPass.
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkPositiveLayerTest, RenderPassBeginStencilLoadOp) {
TEST_DESCRIPTION("Create a stencil-only attachment with a LOAD_OP set to CLEAR. stencil[Load|Store]Op used to be ignored.");
VkResult result = VK_SUCCESS;
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageFormatProperties formatProps;
vk::GetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0,
&formatProps);
if (formatProps.maxExtent.width < 100 || formatProps.maxExtent.height < 100) {
printf("%s Image format max extent is too small.\n", kSkipPrefix);
return;
}
VkFormat depth_stencil_fmt = depth_format;
m_depthStencil->Init(m_device, 100, 100, depth_stencil_fmt,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
VkAttachmentDescription att = {};
VkAttachmentReference ref = {};
att.format = depth_stencil_fmt;
att.samples = VK_SAMPLE_COUNT_1_BIT;
att.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
att.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
att.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkClearValue clear;
clear.depthStencil.depth = 1.0;
clear.depthStencil.stencil = 0;
ref.attachment = 0;
ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.flags = 0;
subpass.inputAttachmentCount = 0;
subpass.pInputAttachments = NULL;
subpass.colorAttachmentCount = 0;
subpass.pColorAttachments = NULL;
subpass.pResolveAttachments = NULL;
subpass.pDepthStencilAttachment = &ref;
subpass.preserveAttachmentCount = 0;
subpass.pPreserveAttachments = NULL;
VkRenderPass rp;
VkRenderPassCreateInfo rp_info = {};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = 1;
rp_info.pAttachments = &att;
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
result = vk::CreateRenderPass(device(), &rp_info, NULL, &rp);
ASSERT_VK_SUCCESS(result);
VkImageView *depthView = m_depthStencil->BindInfo();
VkFramebufferCreateInfo fb_info = {};
fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_info.pNext = NULL;
fb_info.renderPass = rp;
fb_info.attachmentCount = 1;
fb_info.pAttachments = depthView;
fb_info.width = 100;
fb_info.height = 100;
fb_info.layers = 1;
VkFramebuffer fb;
result = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb);
ASSERT_VK_SUCCESS(result);
VkRenderPassBeginInfo rpbinfo = {};
rpbinfo.clearValueCount = 1;
rpbinfo.pClearValues = &clear;
rpbinfo.pNext = NULL;
rpbinfo.renderPass = rp;
rpbinfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rpbinfo.renderArea.extent.width = 100;
rpbinfo.renderArea.extent.height = 100;
rpbinfo.renderArea.offset.x = 0;
rpbinfo.renderArea.offset.y = 0;
rpbinfo.framebuffer = fb;
VkFenceObj fence;
fence.init(*m_device, VkFenceObj::create_info());
ASSERT_TRUE(fence.initialized());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(rpbinfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(fence);
VkImageObj destImage(m_device);
destImage.Init(100, 100, 1, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
fence.wait(VK_TRUE, UINT64_MAX);
VkCommandBufferObj cmdbuf(m_device, m_commandPool);
cmdbuf.begin();
m_depthStencil->ImageMemoryBarrier(&cmdbuf, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT,
VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
destImage.ImageMemoryBarrier(&cmdbuf, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, 0,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkImageCopy cregion;
cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
cregion.srcSubresource.mipLevel = 0;
cregion.srcSubresource.baseArrayLayer = 0;
cregion.srcSubresource.layerCount = 1;
cregion.srcOffset.x = 0;
cregion.srcOffset.y = 0;
cregion.srcOffset.z = 0;
cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
cregion.dstSubresource.mipLevel = 0;
cregion.dstSubresource.baseArrayLayer = 0;
cregion.dstSubresource.layerCount = 1;
cregion.dstOffset.x = 0;
cregion.dstOffset.y = 0;
cregion.dstOffset.z = 0;
cregion.extent.width = 100;
cregion.extent.height = 100;
cregion.extent.depth = 1;
cmdbuf.CopyImage(m_depthStencil->handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, destImage.handle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion);
cmdbuf.end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmdbuf.handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
}
TEST_F(VkPositiveLayerTest, RenderPassBeginInlineAndSecondaryCommandBuffers) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyNotFound();
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, RenderPassBeginDepthStencilLayoutTransitionFromUndefined) {
TEST_DESCRIPTION(
"Create a render pass with depth-stencil attachment where layout transition from UNDEFINED TO DS_READ_ONLY_OPTIMAL is set "
"by render pass and verify that transition has correctly occurred at queue submit time with no validation errors.");
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageFormatProperties format_props;
vk::GetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &format_props);
if (format_props.maxExtent.width < 32 || format_props.maxExtent.height < 32) {
printf("%s Depth extent too small, RenderPassDepthStencilLayoutTransition skipped.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// A renderpass with one depth/stencil attachment.
VkAttachmentDescription attachment = {0,
depth_format,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible ds image.
VkImageObj image(m_device);
image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
depth_format,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_DEPTH_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyNotFound();
// Cleanup
vk::DestroyImageView(m_device->device(), view, NULL);
vk::DestroyRenderPass(m_device->device(), rp, NULL);
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
}
TEST_F(VkPositiveLayerTest, DestroyPipelineRenderPass) {
TEST_DESCRIPTION("Draw using a pipeline whose create renderPass has been destroyed.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkResult err;
// Create a renderPass that's compatible with Draw-time renderPass
VkAttachmentDescription att = {};
att.format = m_render_target_fmt;
att.samples = VK_SAMPLE_COUNT_1_BIT;
att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentReference ref = {};
ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
ref.attachment = 0;
m_renderPassClearValues.clear();
VkClearValue clear = {};
clear.color = m_clear_color;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.flags = 0;
subpass.inputAttachmentCount = 0;
subpass.pInputAttachments = NULL;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &ref;
subpass.pResolveAttachments = NULL;
subpass.pDepthStencilAttachment = NULL;
subpass.preserveAttachmentCount = 0;
subpass.pPreserveAttachments = NULL;
VkRenderPassCreateInfo rp_info = {};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = 1;
rp_info.pAttachments = &att;
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
VkRenderPass rp;
err = vk::CreateRenderPass(device(), &rp_info, NULL, &rp);
ASSERT_VK_SUCCESS(err);
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_viewports.push_back(viewport);
pipe.SetViewport(m_viewports);
VkRect2D rect = {{0, 0}, {64, 64}};
m_scissors.push_back(rect);
pipe.SetScissor(m_scissors);
const VkPipelineLayoutObj pl(m_device);
pipe.CreateVKPipeline(pl.handle(), rp);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// Destroy renderPass before pipeline is used in Draw
// We delay until after CmdBindPipeline to verify that invalid binding isn't
// created between CB & renderPass, which we used to do.
vk::DestroyRenderPass(m_device->device(), rp, nullptr);
vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
}
TEST_F(VkPositiveLayerTest, ResetQueryPoolFromDifferentCB) {
TEST_DESCRIPTION("Reset a query on one CB and use it in another.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = m_commandPool->handle();
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(command_buffer[0], &begin_info);
vk::CmdResetQueryPool(command_buffer[0], query_pool, 0, 1);
vk::EndCommandBuffer(command_buffer[0]);
vk::BeginCommandBuffer(command_buffer[1], &begin_info);
vk::CmdBeginQuery(command_buffer[1], query_pool, 0, 0);
vk::CmdEndQuery(command_buffer[1], query_pool, 0);
vk::EndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info[2]{};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].commandBufferCount = 1;
submit_info[0].pCommandBuffers = &command_buffer[0];
submit_info[0].signalSemaphoreCount = 0;
submit_info[0].pSignalSemaphores = nullptr;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].commandBufferCount = 1;
submit_info[1].pCommandBuffers = &command_buffer[1];
submit_info[1].signalSemaphoreCount = 0;
submit_info[1].pSignalSemaphores = nullptr;
vk::QueueSubmit(m_device->m_queue, 2, &submit_info[0], VK_NULL_HANDLE);
}
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
vk::FreeCommandBuffers(m_device->device(), m_commandPool->handle(), 2, command_buffer);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, BasicQuery) {
TEST_DESCRIPTION("Use a couple occlusion queries");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t qfi = 0;
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
bci.size = 4 * sizeof(uint64_t);
bci.queueFamilyIndexCount = 1;
bci.pQueueFamilyIndices = &qfi;
VkBufferObj buffer;
VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
buffer.init(*m_device, bci, mem_props);
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_info;
query_pool_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_info.pNext = NULL;
query_pool_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_info.flags = 0;
query_pool_info.queryCount = 2;
query_pool_info.pipelineStatistics = 0;
VkResult res = vk::CreateQueryPool(m_device->handle(), &query_pool_info, NULL, &query_pool);
ASSERT_VK_SUCCESS(res);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.InitState();
pipe.CreateGraphicsPipeline();
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 2);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 1, 0);
vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
vk::CmdEndRenderPass(m_commandBuffer->handle());
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 1);
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 2, buffer.handle(), 0, sizeof(uint64_t),
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
uint64_t samples_passed[4];
res = vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 2, sizeof(samples_passed), samples_passed, sizeof(uint64_t),
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
ASSERT_VK_SUCCESS(res);
m_errorMonitor->VerifyNotFound();
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkPositiveLayerTest, MultiplaneGetImageSubresourceLayout) {
TEST_DESCRIPTION("Positive test, query layout of a single plane of a multiplane image. (repro Github #2530)");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_LINEAR;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify format
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImage image;
VkResult err = vk::CreateImage(device(), &ci, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Query layout of 3rd plane
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
subres.mipLevel = 0;
subres.arrayLayer = 0;
VkSubresourceLayout layout = {};
m_errorMonitor->ExpectSuccess();
vk::GetImageSubresourceLayout(device(), image, &subres, &layout);
m_errorMonitor->VerifyNotFound();
vk::DestroyImage(device(), image, NULL);
}
TEST_F(VkPositiveLayerTest, OwnershipTranfersImage) {
TEST_DESCRIPTION("Valid image ownership transfers that shouldn't create errors");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (no_gfx == UINT32_MAX) {
printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix);
return;
}
VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get();
VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue);
// Create an "exclusive" image owned by the graphics queue.
VkImageObj image(m_device);
VkFlags image_use = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, image_use, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
auto image_subres = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1);
auto image_barrier = image.image_memory_barrier(0, 0, image.Layout(), image.Layout(), image_subres);
image_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_;
image_barrier.dstQueueFamilyIndex = no_gfx;
ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, nullptr, &image_barrier);
// Change layouts while changing ownership
image_barrier.srcQueueFamilyIndex = no_gfx;
image_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
image_barrier.oldLayout = image.Layout();
// Make sure the new layout is different from the old
if (image_barrier.oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
} else {
image_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, nullptr, &image_barrier);
}
TEST_F(VkPositiveLayerTest, OwnershipTranfersBuffer) {
TEST_DESCRIPTION("Valid buffer ownership transfers that shouldn't create errors");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (no_gfx == UINT32_MAX) {
printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix);
return;
}
VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get();
VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue);
// Create a buffer
const VkDeviceSize buffer_size = 256;
uint8_t data[buffer_size] = {0xFF};
VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT);
ASSERT_TRUE(buffer.initialized());
auto buffer_barrier = buffer.buffer_memory_barrier(0, 0, 0, VK_WHOLE_SIZE);
// Let gfx own it.
buffer_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_;
buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
ValidOwnershipTransferOp(m_errorMonitor, m_commandBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
&buffer_barrier, nullptr);
// Transfer it to non-gfx
buffer_barrier.dstQueueFamilyIndex = no_gfx;
ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr);
// Transfer it to gfx
buffer_barrier.srcQueueFamilyIndex = no_gfx;
buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, &buffer_barrier, nullptr);
}
TEST_F(VkPositiveLayerTest, LayoutFromPresentWithoutAccessMemoryRead) {
// Transition an image away from PRESENT_SRC_KHR without ACCESS_MEMORY_READ
// in srcAccessMask.
// The required behavior here was a bit unclear in earlier versions of the
// spec, but there is no memory dependency required here, so this should
// work without warnings.
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageMemoryBarrier barrier = {};
VkImageSubresourceRange range;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
barrier.dstAccessMask = 0;
barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.image = image.handle();
range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
range.baseMipLevel = 0;
range.levelCount = 1;
range.baseArrayLayer = 0;
range.layerCount = 1;
barrier.subresourceRange = range;
VkCommandBufferObj cmdbuf(m_device, m_commandPool);
cmdbuf.begin();
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
barrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CopyNonupdatedDescriptors) {
TEST_DESCRIPTION("Copy non-updated descriptors");
unsigned int i;
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet src_descriptor_set(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr},
{2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
OneOffDescriptorSet dst_descriptor_set(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr},
});
m_errorMonitor->ExpectSuccess();
const unsigned int copy_size = 2;
VkCopyDescriptorSet copy_ds_update[copy_size];
memset(copy_ds_update, 0, sizeof(copy_ds_update));
for (i = 0; i < copy_size; i++) {
copy_ds_update[i].sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET;
copy_ds_update[i].srcSet = src_descriptor_set.set_;
copy_ds_update[i].srcBinding = i;
copy_ds_update[i].dstSet = dst_descriptor_set.set_;
copy_ds_update[i].dstBinding = i;
copy_ds_update[i].descriptorCount = 1;
}
vk::UpdateDescriptorSets(m_device->device(), 0, NULL, copy_size, copy_ds_update);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ConfirmNoVLErrorWhenVkCmdClearAttachmentsCalledInSecondaryCB) {
TEST_DESCRIPTION(
"This test is to verify that when vkCmdClearAttachments is called by a secondary commandbuffer, the validation layers do "
"not throw an error if the primary commandbuffer begins a renderpass before executing the secondary commandbuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferBeginInfo info = {};
VkCommandBufferInheritanceInfo hinfo = {};
info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.pInheritanceInfo = &hinfo;
hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
hinfo.pNext = NULL;
hinfo.renderPass = renderPass();
hinfo.subpass = 0;
hinfo.framebuffer = m_framebuffer;
hinfo.occlusionQueryEnable = VK_FALSE;
hinfo.queryFlags = 0;
hinfo.pipelineStatistics = 0;
secondary.begin(&info);
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 0.0;
color_attachment.clearValue.color.float32[1] = 0.0;
color_attachment.clearValue.color.float32[2] = 0.0;
color_attachment.clearValue.color.float32[3] = 0.0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1};
vk::CmdClearAttachments(secondary.handle(), 1, &color_attachment, 1, &clear_rect);
secondary.end();
// Modify clear rect here to verify that it doesn't cause validation error
clear_rect = {{{0, 0}, {99999999, 99999999}}, 0, 0};
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
vk::CmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineComplexTypes) {
TEST_DESCRIPTION("Smoke test for complex types across VS/FS boundary");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().tessellationShader) {
printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj tcs(m_device, bindStateTscShaderText, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this);
VkShaderObj tes(m_device, bindStateTeshaderText, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3};
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.gp_ci_.pTessellationState = &tsci;
pipe.gp_ci_.pInputAssemblyState = &iasci;
pipe.shader_stages_ = {vs.GetStageCreateInfo(), tcs.GetStageCreateInfo(), tes.GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.InitState();
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ShaderRelaxedBlockLayout) {
// This is a positive test, no errors expected
// Verifies the ability to relax block layout rules with a shader that requires them to be relaxed
TEST_DESCRIPTION("Create a shader that requires relaxed block layout.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// The Relaxed Block Layout extension was promoted to core in 1.1.
// Go ahead and check for it and turn it on in case a 1.0 device has it.
if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader requiring relaxed layout.
// Without relaxed layout, we would expect a message like:
// "Structure id 2 decorated as Block for variable in Uniform storage class
// must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16"
const std::string spv_source = R"(
OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint Vertex %main "main"
OpSource GLSL 450
OpMemberDecorate %S 0 Offset 0
OpMemberDecorate %S 1 Offset 4
OpDecorate %S Block
OpDecorate %B DescriptorSet 0
OpDecorate %B Binding 0
%void = OpTypeVoid
%3 = OpTypeFunction %void
%float = OpTypeFloat 32
%v3float = OpTypeVector %float 3
%S = OpTypeStruct %float %v3float
%_ptr_Uniform_S = OpTypePointer Uniform %S
%B = OpVariable %_ptr_Uniform_S Uniform
%main = OpFunction %void None %3
%5 = OpLabel
OpReturn
OpFunctionEnd
)";
m_errorMonitor->ExpectSuccess();
VkShaderObj vs(m_device, spv_source, VK_SHADER_STAGE_VERTEX_BIT, this);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ShaderUboStd430Layout) {
// This is a positive test, no errors expected
// Verifies the ability to scalar block layout rules with a shader that requires them to be relaxed
TEST_DESCRIPTION("Create a shader that requires UBO std430 layout.");
// Enable req'd extensions
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for the UBO standard block layout extension and turn it on if it's available
if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME);
PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
(PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
auto uniform_buffer_standard_layout_features = lvl_init_struct<VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR>(NULL);
uniform_buffer_standard_layout_features.uniformBufferStandardLayout = VK_TRUE;
auto query_features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&uniform_buffer_standard_layout_features);
vkGetPhysicalDeviceFeatures2(gpu(), &query_features2);
auto set_features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&uniform_buffer_standard_layout_features);
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &set_features2));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader requiring std430 in a uniform buffer.
// Without uniform buffer standard layout, we would expect a message like:
// "Structure id 3 decorated as Block for variable in Uniform storage class
// must follow standard uniform buffer layout rules: member 0 is an array
// with stride 4 not satisfying alignment to 16"
const std::string spv_source = R"(
OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint Vertex %main "main"
OpSource GLSL 460
OpDecorate %_arr_float_uint_8 ArrayStride 4
OpMemberDecorate %foo 0 Offset 0
OpDecorate %foo Block
OpDecorate %b DescriptorSet 0
OpDecorate %b Binding 0
%void = OpTypeVoid
%3 = OpTypeFunction %void
%float = OpTypeFloat 32
%uint = OpTypeInt 32 0
%uint_8 = OpConstant %uint 8
%_arr_float_uint_8 = OpTypeArray %float %uint_8
%foo = OpTypeStruct %_arr_float_uint_8
%_ptr_Uniform_foo = OpTypePointer Uniform %foo
%b = OpVariable %_ptr_Uniform_foo Uniform
%main = OpFunction %void None %3
%5 = OpLabel
OpReturn
OpFunctionEnd
)";
std::vector<unsigned int> spv;
VkShaderModuleCreateInfo module_create_info;
VkShaderModule shader_module;
module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
module_create_info.pNext = NULL;
ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv);
module_create_info.pCode = spv.data();
module_create_info.codeSize = spv.size() * sizeof(unsigned int);
module_create_info.flags = 0;
m_errorMonitor->ExpectSuccess();
VkResult err = vk::CreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module);
m_errorMonitor->VerifyNotFound();
if (err == VK_SUCCESS) {
vk::DestroyShaderModule(m_device->handle(), shader_module, NULL);
}
}
TEST_F(VkPositiveLayerTest, ShaderScalarBlockLayout) {
// This is a positive test, no errors expected
// Verifies the ability to scalar block layout rules with a shader that requires them to be relaxed
TEST_DESCRIPTION("Create a shader that requires scalar block layout.");
// Enable req'd extensions
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for the Scalar Block Layout extension and turn it on if it's available
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME);
PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
(PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
auto scalar_block_features = lvl_init_struct<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(NULL);
scalar_block_features.scalarBlockLayout = VK_TRUE;
auto query_features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&scalar_block_features);
vkGetPhysicalDeviceFeatures2(gpu(), &query_features2);
auto set_features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&scalar_block_features);
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &set_features2));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader requiring scalar layout.
// Without scalar layout, we would expect a message like:
// "Structure id 2 decorated as Block for variable in Uniform storage class
// must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16"
const std::string spv_source = R"(
OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint Vertex %main "main"
OpSource GLSL 450
OpMemberDecorate %S 0 Offset 0
OpMemberDecorate %S 1 Offset 4
OpMemberDecorate %S 2 Offset 8
OpDecorate %S Block
OpDecorate %B DescriptorSet 0
OpDecorate %B Binding 0
%void = OpTypeVoid
%3 = OpTypeFunction %void
%float = OpTypeFloat 32
%v3float = OpTypeVector %float 3
%S = OpTypeStruct %float %float %v3float
%_ptr_Uniform_S = OpTypePointer Uniform %S
%B = OpVariable %_ptr_Uniform_S Uniform
%main = OpFunction %void None %3
%5 = OpLabel
OpReturn
OpFunctionEnd
)";
m_errorMonitor->ExpectSuccess();
VkShaderObj vs(m_device, spv_source, VK_SHADER_STAGE_VERTEX_BIT, this);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SpirvGroupDecorations) {
TEST_DESCRIPTION("Test shader validation support for group decorations.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const std::string spv_source = R"(
OpCapability Shader
OpMemoryModel Logical GLSL450
OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID
OpExecutionMode %main LocalSize 1 1 1
OpSource GLSL 430
OpName %main "main"
OpName %gl_GlobalInvocationID "gl_GlobalInvocationID"
OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId
OpDecorate %_runtimearr_float ArrayStride 4
OpDecorate %4 BufferBlock
OpDecorate %5 Offset 0
%4 = OpDecorationGroup
%5 = OpDecorationGroup
OpGroupDecorate %4 %_struct_6 %_struct_7 %_struct_8 %_struct_9 %_struct_10 %_struct_11
OpGroupMemberDecorate %5 %_struct_6 0 %_struct_7 0 %_struct_8 0 %_struct_9 0 %_struct_10 0 %_struct_11 0
OpDecorate %12 DescriptorSet 0
OpDecorate %13 DescriptorSet 0
OpDecorate %13 NonWritable
OpDecorate %13 Restrict
%14 = OpDecorationGroup
%12 = OpDecorationGroup
%13 = OpDecorationGroup
OpGroupDecorate %12 %15
OpGroupDecorate %12 %15
OpGroupDecorate %12 %15
OpDecorate %15 DescriptorSet 0
OpDecorate %15 Binding 5
OpGroupDecorate %14 %16
OpDecorate %16 DescriptorSet 0
OpDecorate %16 Binding 0
OpGroupDecorate %12 %17
OpDecorate %17 Binding 1
OpGroupDecorate %13 %18 %19
OpDecorate %18 Binding 2
OpDecorate %19 Binding 3
OpGroupDecorate %14 %20
OpGroupDecorate %12 %20
OpGroupDecorate %13 %20
OpDecorate %20 Binding 4
%bool = OpTypeBool
%void = OpTypeVoid
%23 = OpTypeFunction %void
%uint = OpTypeInt 32 0
%int = OpTypeInt 32 1
%float = OpTypeFloat 32
%v3uint = OpTypeVector %uint 3
%v3float = OpTypeVector %float 3
%_ptr_Input_v3uint = OpTypePointer Input %v3uint
%_ptr_Uniform_int = OpTypePointer Uniform %int
%_ptr_Uniform_float = OpTypePointer Uniform %float
%_runtimearr_int = OpTypeRuntimeArray %int
%_runtimearr_float = OpTypeRuntimeArray %float
%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input
%int_0 = OpConstant %int 0
%_struct_6 = OpTypeStruct %_runtimearr_float
%_ptr_Uniform__struct_6 = OpTypePointer Uniform %_struct_6
%15 = OpVariable %_ptr_Uniform__struct_6 Uniform
%_struct_7 = OpTypeStruct %_runtimearr_float
%_ptr_Uniform__struct_7 = OpTypePointer Uniform %_struct_7
%16 = OpVariable %_ptr_Uniform__struct_7 Uniform
%_struct_8 = OpTypeStruct %_runtimearr_float
%_ptr_Uniform__struct_8 = OpTypePointer Uniform %_struct_8
%17 = OpVariable %_ptr_Uniform__struct_8 Uniform
%_struct_9 = OpTypeStruct %_runtimearr_float
%_ptr_Uniform__struct_9 = OpTypePointer Uniform %_struct_9
%18 = OpVariable %_ptr_Uniform__struct_9 Uniform
%_struct_10 = OpTypeStruct %_runtimearr_float
%_ptr_Uniform__struct_10 = OpTypePointer Uniform %_struct_10
%19 = OpVariable %_ptr_Uniform__struct_10 Uniform
%_struct_11 = OpTypeStruct %_runtimearr_float
%_ptr_Uniform__struct_11 = OpTypePointer Uniform %_struct_11
%20 = OpVariable %_ptr_Uniform__struct_11 Uniform
%main = OpFunction %void None %23
%40 = OpLabel
%41 = OpLoad %v3uint %gl_GlobalInvocationID
%42 = OpCompositeExtract %uint %41 0
%43 = OpAccessChain %_ptr_Uniform_float %16 %int_0 %42
%44 = OpAccessChain %_ptr_Uniform_float %17 %int_0 %42
%45 = OpAccessChain %_ptr_Uniform_float %18 %int_0 %42
%46 = OpAccessChain %_ptr_Uniform_float %19 %int_0 %42
%47 = OpAccessChain %_ptr_Uniform_float %20 %int_0 %42
%48 = OpAccessChain %_ptr_Uniform_float %15 %int_0 %42
%49 = OpLoad %float %43
%50 = OpLoad %float %44
%51 = OpLoad %float %45
%52 = OpLoad %float %46
%53 = OpLoad %float %47
%54 = OpFAdd %float %49 %50
%55 = OpFAdd %float %54 %51
%56 = OpFAdd %float %55 %52
%57 = OpFAdd %float %56 %53
OpStore %48 %57
OpReturn
OpFunctionEnd
)";
// CreateDescriptorSetLayout
VkDescriptorSetLayoutBinding dslb[6] = {};
size_t dslb_size = size(dslb);
for (size_t i = 0; i < dslb_size; i++) {
dslb[i].binding = i;
dslb[i].descriptorCount = 1;
dslb[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
dslb[i].pImmutableSamplers = NULL;
dslb[i].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_ALL;
}
if (m_device->props.limits.maxPerStageDescriptorStorageBuffers < dslb_size) {
printf("%sNeeded storage buffer bindings exceeds this devices limit. Skipping tests.\n", kSkipPrefix);
return;
}
CreateComputePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.dsl_bindings_.resize(dslb_size);
memcpy(pipe.dsl_bindings_.data(), dslb, dslb_size * sizeof(VkDescriptorSetLayoutBinding));
pipe.cs_.reset(new VkShaderObj(m_device, bindStateMinimalShaderText, VK_SHADER_STAGE_COMPUTE_BIT, this));
pipe.InitState();
m_errorMonitor->ExpectSuccess();
pipe.CreateComputePipeline();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension1of2) {
// This is a positive test, no errors expected
// Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID
TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 1 of 2");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
// These tests require that the device support multiViewport
if (!m_device->phy().features().multiViewport) {
printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader using viewport array capability
char const *vsSource =
"#version 450\n"
"#extension GL_ARB_shader_viewport_layer_array : enable\n"
"void main() {\n"
" gl_ViewportIndex = 1;\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {vs.GetStageCreateInfo()};
pipe.InitState();
m_errorMonitor->ExpectSuccess();
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension2of2) {
// This is a positive test, no errors expected
// Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID
TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 2 of 2");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
// These tests require that the device support multiViewport
if (!m_device->phy().features().multiViewport) {
printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader using viewport array capability
char const *vsSource =
"#version 450\n"
"#extension GL_ARB_shader_viewport_layer_array : enable\n"
"void main() {\n"
" gl_ViewportIndex = 1;\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {vs.GetStageCreateInfo()};
pipe.InitState();
m_errorMonitor->ExpectSuccess();
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineFragmentOutputNotWrittenButMasked) {
TEST_DESCRIPTION(
"Test that no error is produced when the fragment shader fails to declare an output, but the corresponding attachment's "
"write mask is 0.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
char const *fsSource =
"#version 450\n"
"\n"
"void main(){\n"
"}\n";
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0, not written, but also masked */
pipe.AddDefaultColorAttachment(0);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, StatelessValidationDisable) {
TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter with stateless validation disabled");
VkValidationFeatureDisableEXT disables[] = {VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT};
VkValidationFeaturesEXT features = {};
features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
features.disabledValidationFeatureCount = 1;
features.pDisabledValidationFeatures = disables;
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, pool_flags, &features));
m_errorMonitor->ExpectSuccess();
// Specify 0 for a reserved VkFlags parameter. Normally this is expected to trigger an stateless validation error, but this
// validation was disabled via the features extension, so no errors should be forthcoming.
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_info.flags = 1;
vk::CreateEvent(device(), &event_info, NULL, &event_handle);
vk::DestroyEvent(device(), event_handle, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, PointSizeWriteInFunction) {
TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize in vertex shader function.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
ASSERT_NO_FATAL_FAILURE(InitViewport());
// Create VS declaring PointSize and write to it in a function call.
VkShaderObj vs(m_device, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
{
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {vs.GetStageCreateInfo(), ps.GetStageCreateInfo()};
pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
pipe.InitState();
pipe.CreateGraphicsPipeline();
}
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, PointSizeGeomShaderSuccess) {
TEST_DESCRIPTION(
"Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, and write in the final geometry stage.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) {
printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
ASSERT_NO_FATAL_FAILURE(InitViewport());
// Create VS declaring PointSize and writing to it
VkShaderObj vs(m_device, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj gs(m_device, bindStateGeomPointSizeShaderText, VK_SHADER_STAGE_GEOMETRY_BIT, this);
VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), ps.GetStageCreateInfo()};
// Set Input Assembly to TOPOLOGY POINT LIST
pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
pipe.InitState();
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, LoosePointSizeWrite) {
TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize outside of a structure.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
ASSERT_NO_FATAL_FAILURE(InitViewport());
const std::string LoosePointSizeWrite = R"(
OpCapability Shader
%1 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
OpEntryPoint Vertex %main "main" %glposition %glpointsize %gl_VertexIndex
OpSource GLSL 450
OpName %main "main"
OpName %vertices "vertices"
OpName %glposition "glposition"
OpName %glpointsize "glpointsize"
OpName %gl_VertexIndex "gl_VertexIndex"
OpDecorate %glposition BuiltIn Position
OpDecorate %glpointsize BuiltIn PointSize
OpDecorate %gl_VertexIndex BuiltIn VertexIndex
%void = OpTypeVoid
%3 = OpTypeFunction %void
%float = OpTypeFloat 32
%v2float = OpTypeVector %float 2
%uint = OpTypeInt 32 0
%uint_3 = OpConstant %uint 3
%_arr_v2float_uint_3 = OpTypeArray %v2float %uint_3
%_ptr_Private__arr_v2float_uint_3 = OpTypePointer Private %_arr_v2float_uint_3
%vertices = OpVariable %_ptr_Private__arr_v2float_uint_3 Private
%int = OpTypeInt 32 1
%int_0 = OpConstant %int 0
%float_n1 = OpConstant %float -1
%16 = OpConstantComposite %v2float %float_n1 %float_n1
%_ptr_Private_v2float = OpTypePointer Private %v2float
%int_1 = OpConstant %int 1
%float_1 = OpConstant %float 1
%21 = OpConstantComposite %v2float %float_1 %float_n1
%int_2 = OpConstant %int 2
%float_0 = OpConstant %float 0
%25 = OpConstantComposite %v2float %float_0 %float_1
%v4float = OpTypeVector %float 4
%_ptr_Output_gl_Position = OpTypePointer Output %v4float
%glposition = OpVariable %_ptr_Output_gl_Position Output
%_ptr_Output_gl_PointSize = OpTypePointer Output %float
%glpointsize = OpVariable %_ptr_Output_gl_PointSize Output
%_ptr_Input_int = OpTypePointer Input %int
%gl_VertexIndex = OpVariable %_ptr_Input_int Input
%int_3 = OpConstant %int 3
%_ptr_Output_v4float = OpTypePointer Output %v4float
%_ptr_Output_float = OpTypePointer Output %float
%main = OpFunction %void None %3
%5 = OpLabel
%18 = OpAccessChain %_ptr_Private_v2float %vertices %int_0
OpStore %18 %16
%22 = OpAccessChain %_ptr_Private_v2float %vertices %int_1
OpStore %22 %21
%26 = OpAccessChain %_ptr_Private_v2float %vertices %int_2
OpStore %26 %25
%33 = OpLoad %int %gl_VertexIndex
%35 = OpSMod %int %33 %int_3
%36 = OpAccessChain %_ptr_Private_v2float %vertices %35
%37 = OpLoad %v2float %36
%38 = OpCompositeExtract %float %37 0
%39 = OpCompositeExtract %float %37 1
%40 = OpCompositeConstruct %v4float %38 %39 %float_0 %float_1
%42 = OpAccessChain %_ptr_Output_v4float %glposition
OpStore %42 %40
OpStore %glpointsize %float_1
OpReturn
OpFunctionEnd
)";
// Create VS declaring PointSize and write to it in a function call.
VkShaderObj vs(m_device, LoosePointSizeWrite, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
{
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {vs.GetStageCreateInfo(), ps.GetStageCreateInfo()};
// Set Input Assembly to TOPOLOGY POINT LIST
pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
pipe.InitState();
pipe.CreateGraphicsPipeline();
}
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, UncompressedToCompressedImageCopy) {
TEST_DESCRIPTION("Image copies between compressed and uncompressed images");
ASSERT_NO_FATAL_FAILURE(Init());
// Verify format support
// Size-compatible (64-bit) formats. Uncompressed is 64 bits per texel, compressed is 64 bits per 4x4 block (or 4bpt).
if (!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR) ||
!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) {
printf("%s Required formats/features not supported - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix);
return;
}
VkImageObj uncomp_10x10t_image(m_device); // Size = 10 * 10 * 64 = 6400
VkImageObj comp_10x10b_40x40t_image(m_device); // Size = 40 * 40 * 4 = 6400
uncomp_10x10t_image.Init(10, 10, 1, VK_FORMAT_R16G16B16A16_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
comp_10x10b_40x40t_image.Init(40, 40, 1, VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
if (!uncomp_10x10t_image.initialized() || !comp_10x10b_40x40t_image.initialized()) {
printf("%s Unable to initialize surfaces - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix);
return;
}
// Both copies represent the same number of bytes. Bytes Per Texel = 1 for bc6, 16 for uncompressed
// Copy compressed to uncompressed
VkImageCopy copy_region = {};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
// Copy from uncompressed to compressed
copy_region.extent = {10, 10, 1}; // Dimensions in (uncompressed) texels
vk::CmdCopyImage(m_commandBuffer->handle(), uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL,
comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
// And from compressed to uncompressed
copy_region.extent = {40, 40, 1}; // Dimensions in (compressed) texels
vk::CmdCopyImage(m_commandBuffer->handle(), comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL,
uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
}
TEST_F(VkPositiveLayerTest, DeleteDescriptorSetLayoutsBeforeDescriptorSets) {
TEST_DESCRIPTION("Create DSLayouts and DescriptorSets and then delete the DSLayouts before the DescriptorSets.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkResult err;
m_errorMonitor->ExpectSuccess();
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool_one;
err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool_one);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
VkDescriptorSet descriptorSet;
{
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool_one;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
} // ds_layout destroyed
err = vk::FreeDescriptorSets(m_device->device(), ds_pool_one, 1, &descriptorSet);
vk::DestroyDescriptorPool(m_device->device(), ds_pool_one, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CommandPoolDeleteWithReferences) {
TEST_DESCRIPTION("Ensure the validation layers bookkeeping tracks the implicit command buffer frees.");
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandPoolCreateInfo cmd_pool_info = {};
cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmd_pool_info.pNext = NULL;
cmd_pool_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
cmd_pool_info.flags = 0;
VkCommandPool secondary_cmd_pool;
VkResult res = vk::CreateCommandPool(m_device->handle(), &cmd_pool_info, NULL, &secondary_cmd_pool);
ASSERT_VK_SUCCESS(res);
VkCommandBufferAllocateInfo cmdalloc = vk_testing::CommandBuffer::create_info(secondary_cmd_pool);
cmdalloc.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
VkCommandBuffer secondary_cmds;
res = vk::AllocateCommandBuffers(m_device->handle(), &cmdalloc, &secondary_cmds);
VkCommandBufferInheritanceInfo cmd_buf_inheritance_info = {};
cmd_buf_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
cmd_buf_inheritance_info.pNext = NULL;
cmd_buf_inheritance_info.renderPass = VK_NULL_HANDLE;
cmd_buf_inheritance_info.subpass = 0;
cmd_buf_inheritance_info.framebuffer = VK_NULL_HANDLE;
cmd_buf_inheritance_info.occlusionQueryEnable = VK_FALSE;
cmd_buf_inheritance_info.queryFlags = 0;
cmd_buf_inheritance_info.pipelineStatistics = 0;
VkCommandBufferBeginInfo secondary_begin = {};
secondary_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
secondary_begin.pNext = NULL;
secondary_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
secondary_begin.pInheritanceInfo = &cmd_buf_inheritance_info;
res = vk::BeginCommandBuffer(secondary_cmds, &secondary_begin);
ASSERT_VK_SUCCESS(res);
vk::EndCommandBuffer(secondary_cmds);
m_commandBuffer->begin();
vk::CmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_cmds);
m_commandBuffer->end();
// DestroyCommandPool *implicitly* frees the command buffers allocated from it
vk::DestroyCommandPool(m_device->handle(), secondary_cmd_pool, NULL);
// If bookkeeping has been lax, validating the reset will attempt to touch deleted data
res = vk::ResetCommandPool(m_device->handle(), m_commandPool->handle(), 0);
ASSERT_VK_SUCCESS(res);
}
TEST_F(VkPositiveLayerTest, SecondaryCommandBufferClearColorAttachments) {
TEST_DESCRIPTION("Create a secondary command buffer and record a CmdClearAttachments call into it");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferAllocateInfo command_buffer_allocate_info = {};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = m_commandPool->handle();
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
command_buffer_allocate_info.commandBufferCount = 1;
VkCommandBuffer secondary_command_buffer;
ASSERT_VK_SUCCESS(vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer));
VkCommandBufferBeginInfo command_buffer_begin_info = {};
VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {};
command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
command_buffer_inheritance_info.renderPass = m_renderPass;
command_buffer_inheritance_info.framebuffer = m_framebuffer;
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags =
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info;
vk::BeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info);
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 0;
color_attachment.clearValue.color.float32[1] = 0;
color_attachment.clearValue.color.float32[2] = 0;
color_attachment.clearValue.color.float32[3] = 0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {32, 32}}, 0, 1};
vk::CmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect);
vk::EndCommandBuffer(secondary_command_buffer);
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
vk::CmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer);
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SecondaryCommandBufferImageLayoutTransitions) {
TEST_DESCRIPTION("Perform an image layout transition in a secondary command buffer followed by a transition in the primary.");
VkResult err;
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s Couldn't find depth stencil format.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Allocate a secondary and primary cmd buffer
VkCommandBufferAllocateInfo command_buffer_allocate_info = {};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = m_commandPool->handle();
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
command_buffer_allocate_info.commandBufferCount = 1;
VkCommandBuffer secondary_command_buffer;
ASSERT_VK_SUCCESS(vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer));
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
VkCommandBuffer primary_command_buffer;
ASSERT_VK_SUCCESS(vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &primary_command_buffer));
VkCommandBufferBeginInfo command_buffer_begin_info = {};
VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {};
command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info;
err = vk::BeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier.image = image.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(secondary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &img_barrier);
err = vk::EndCommandBuffer(secondary_command_buffer);
ASSERT_VK_SUCCESS(err);
// Now update primary cmd buffer to execute secondary and transitions image
command_buffer_begin_info.pInheritanceInfo = nullptr;
err = vk::BeginCommandBuffer(primary_command_buffer, &command_buffer_begin_info);
ASSERT_VK_SUCCESS(err);
vk::CmdExecuteCommands(primary_command_buffer, 1, &secondary_command_buffer);
VkImageMemoryBarrier img_barrier2 = {};
img_barrier2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier2.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier2.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier2.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier2.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier2.image = image.handle();
img_barrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier2.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
img_barrier2.subresourceRange.baseArrayLayer = 0;
img_barrier2.subresourceRange.baseMipLevel = 0;
img_barrier2.subresourceRange.layerCount = 1;
img_barrier2.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(primary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &img_barrier2);
err = vk::EndCommandBuffer(primary_command_buffer);
ASSERT_VK_SUCCESS(err);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &primary_command_buffer;
err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
err = vk::DeviceWaitIdle(m_device->device());
ASSERT_VK_SUCCESS(err);
vk::FreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &secondary_command_buffer);
vk::FreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &primary_command_buffer);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, IgnoreUnrelatedDescriptor) {
TEST_DESCRIPTION(
"Ensure that the vkUpdateDescriptorSets validation code is ignoring VkWriteDescriptorSet members that are not related to "
"the descriptor type specified by VkWriteDescriptorSet::descriptorType. Correct validation behavior will result in the "
"test running to completion without validation errors.");
const uintptr_t invalid_ptr = 0xcdcdcdcd;
ASSERT_NO_FATAL_FAILURE(Init());
// Verify VK_FORMAT_R8_UNORM supports VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT
const VkFormat format_texel_case = VK_FORMAT_R8_UNORM;
const char *format_texel_case_string = "VK_FORMAT_R8_UNORM";
VkFormatProperties format_properties;
vk::GetPhysicalDeviceFormatProperties(gpu(), format_texel_case, &format_properties);
if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) {
printf("%s Test requires %s to support VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_texel_case_string);
return;
}
// Image Case
{
m_errorMonitor->ExpectSuccess();
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
OneOffDescriptorSet descriptor_set(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkDescriptorImageInfo image_info = {};
image_info.imageView = view;
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptor_set.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
descriptor_write.pImageInfo = &image_info;
// Set pBufferInfo and pTexelBufferView to invalid values, which should
// be
// ignored for descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE.
// This will most likely produce a crash if the parameter_validation
// layer
// does not correctly ignore pBufferInfo.
descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr);
descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr);
vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
}
// Buffer Case
{
m_errorMonitor->ExpectSuccess();
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBufferObj buffer;
buffer.init(*m_device, buffer_create_info);
OneOffDescriptorSet descriptor_set(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkDescriptorBufferInfo buffer_info = {};
buffer_info.buffer = buffer.handle();
buffer_info.offset = 0;
buffer_info.range = 1024;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptor_set.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.pBufferInfo = &buffer_info;
// Set pImageInfo and pTexelBufferView to invalid values, which should
// be
// ignored for descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER.
// This will most likely produce a crash if the parameter_validation
// layer
// does not correctly ignore pImageInfo.
descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr);
descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr);
vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
}
// Texel Buffer Case
{
m_errorMonitor->ExpectSuccess();
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBufferObj buffer;
buffer.init(*m_device, buffer_create_info);
VkBufferViewCreateInfo buff_view_ci = {};
buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
buff_view_ci.buffer = buffer.handle();
buff_view_ci.format = format_texel_case;
buff_view_ci.range = VK_WHOLE_SIZE;
VkBufferView buffer_view;
VkResult err = vk::CreateBufferView(m_device->device(), &buff_view_ci, NULL, &buffer_view);
ASSERT_VK_SUCCESS(err);
OneOffDescriptorSet descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptor_set.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor_write.pTexelBufferView = &buffer_view;
// Set pImageInfo and pBufferInfo to invalid values, which should be
// ignored for descriptorType ==
// VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER.
// This will most likely produce a crash if the parameter_validation
// layer
// does not correctly ignore pImageInfo and pBufferInfo.
descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr);
descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr);
vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
vk::DestroyBufferView(m_device->device(), buffer_view, NULL);
}
}
TEST_F(VkPositiveLayerTest, ImmutableSamplerOnlyDescriptor) {
TEST_DESCRIPTION("Bind a DescriptorSet with only an immutable sampler and make sure that we don't warn for no update.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet descriptor_set(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_});
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_set.set_, 0, nullptr);
m_errorMonitor->VerifyNotFound();
vk::DestroySampler(m_device->device(), sampler, NULL);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, EmptyDescriptorUpdateTest) {
TEST_DESCRIPTION("Update last descriptor in a set that includes an empty binding");
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
// Create layout with two uniform buffer descriptors w/ empty binding between them
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0 /*!*/, 0, nullptr},
{2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
// Create a buffer to be used for update
VkBufferCreateInfo buff_ci = {};
buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buff_ci.size = 256;
buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
err = vk::CreateBuffer(m_device->device(), &buff_ci, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
// Have to bind memory to buffer before descriptor update
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 512; // one allocation for both buffers
mem_alloc.memoryTypeIndex = 0;
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vk::DestroyBuffer(m_device->device(), buffer, NULL);
return;
}
// Make sure allocation is sufficiently large to accommodate buffer requirements
if (mem_reqs.size > mem_alloc.allocationSize) {
mem_alloc.allocationSize = mem_reqs.size;
}
VkDeviceMemory mem;
err = vk::AllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vk::BindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
// Only update the descriptor at binding 2
VkDescriptorBufferInfo buff_info = {};
buff_info.buffer = buffer;
buff_info.offset = 0;
buff_info.range = VK_WHOLE_SIZE;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 2;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.dstSet = ds.set_;
vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
// Cleanup
vk::FreeMemory(m_device->device(), mem, NULL);
vk::DestroyBuffer(m_device->device(), buffer, NULL);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, PushDescriptorNullDstSetTest) {
TEST_DESCRIPTION("Use null dstSet in CmdPushDescriptorSetKHR");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu());
if (push_descriptor_prop.maxPushDescriptors < 1) {
// Some implementations report an invalid maxPushDescriptors of 0
printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 2;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
// Create push descriptor set layout
const VkDescriptorSetLayoutObj push_ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
// Use helper to create graphics pipeline
CreatePipelineHelper helper(*this);
helper.InitInfo();
helper.InitState();
helper.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&push_ds_layout, &ds_layout});
helper.CreateGraphicsPipeline();
const float vbo_data[3] = {1.f, 0.f, 1.f};
VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
VkDescriptorBufferInfo buff_info;
buff_info.buffer = vbo.handle();
buff_info.offset = 0;
buff_info.range = sizeof(vbo_data);
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 2;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.dstSet = 0; // Should not cause a validation error
// Find address of extension call and make the call
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR =
(PFN_vkCmdPushDescriptorSetKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR");
assert(vkCmdPushDescriptorSetKHR != nullptr);
m_commandBuffer->begin();
// In Intel GPU, it needs to bind pipeline before push descriptor set.
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_);
vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_layout_.handle(), 0, 1,
&descriptor_write);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, PushDescriptorUnboundSetTest) {
TEST_DESCRIPTION("Ensure that no validation errors are produced for not bound push descriptor sets");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(