blob: af1f33fdf3ad0f83f5c60ce52b4d33f7d1a30d44 [file] [log] [blame]
/* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Shannon McPherson <shannon@lunarg.com>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <cmath>
#include <set>
#include <sstream>
#include <string>
#include "vk_enum_string_helper.h"
#include "vk_format_utils.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "chassis.h"
#include "state_tracker.h"
#include "shader_validation.h"
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR
// This could also move into a seperate core_validation_android.cpp file... ?
void ValidationStateTracker::RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {
const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
is_node->imported_ahb = true;
}
const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_fmt_android && (0 != ext_fmt_android->externalFormat)) {
is_node->has_ahb_format = true;
is_node->ahb_format = ext_fmt_android->externalFormat;
}
}
void ValidationStateTracker::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info,
VkSamplerYcbcrConversion ycbcr_conversion) {
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_format_android && (0 != ext_format_android->externalFormat)) {
ycbcr_conversion_ahb_fmt_map.emplace(ycbcr_conversion, ext_format_android->externalFormat);
}
};
void ValidationStateTracker::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion) {
ycbcr_conversion_ahb_fmt_map.erase(ycbcr_conversion);
};
#else
void ValidationStateTracker::RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {}
void ValidationStateTracker::RecordCreateSamplerYcbcrConversionANDROID(const VkSamplerYcbcrConversionCreateInfo *create_info,
VkSamplerYcbcrConversion ycbcr_conversion){};
void ValidationStateTracker::RecordDestroySamplerYcbcrConversionANDROID(VkSamplerYcbcrConversion ycbcr_conversion){};
#endif // VK_USE_PLATFORM_ANDROID_KHR
void ValidationStateTracker::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
if (VK_SUCCESS != result) return;
auto is_node = std::make_shared<IMAGE_STATE>(*pImage, pCreateInfo);
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
RecordCreateImageANDROID(pCreateInfo, is_node.get());
}
const auto swapchain_info = lvl_find_in_chain<VkImageSwapchainCreateInfoKHR>(pCreateInfo->pNext);
if (swapchain_info) {
is_node->create_from_swapchain = swapchain_info->swapchain;
}
bool pre_fetch_memory_reqs = true;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (is_node->external_format_android) {
// Do not fetch requirements for external memory images
pre_fetch_memory_reqs = false;
}
#endif
// Record the memory requirements in case they won't be queried
if (pre_fetch_memory_reqs) {
DispatchGetImageMemoryRequirements(device, *pImage, &is_node->requirements);
}
imageMap.insert(std::make_pair(*pImage, std::move(is_node)));
}
void ValidationStateTracker::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
if (!image) return;
IMAGE_STATE *image_state = GetImageState(image);
const VulkanTypedHandle obj_struct(image, kVulkanObjectTypeImage);
InvalidateCommandBuffers(image_state->cb_bindings, obj_struct);
// Clean up memory mapping, bindings and range references for image
for (auto mem_binding : image_state->GetBoundMemory()) {
auto mem_info = GetDevMemState(mem_binding);
if (mem_info) {
RemoveImageMemoryRange(image, mem_info);
}
}
if (image_state->bind_swapchain) {
auto swapchain = GetSwapchainState(image_state->bind_swapchain);
if (swapchain) {
swapchain->images[image_state->bind_swapchain_imageIndex].bound_images.erase(image_state->image);
}
}
RemoveAliasingImage(image_state);
ClearMemoryObjectBindings(obj_struct);
image_state->destroyed = true;
// Remove image from imageMap
imageMap.erase(image);
}
void ValidationStateTracker::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
VkImageLayout imageLayout, const VkClearColorValue *pColor,
uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
AddCommandBufferBindingImage(cb_node, image_state);
}
}
void ValidationStateTracker::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil,
uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
AddCommandBufferBindingImage(cb_node, image_state);
}
}
void ValidationStateTracker::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout,
uint32_t regionCount, const VkImageCopy *pRegions) {
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Update bindings between images and cmd buffer
AddCommandBufferBindingImage(cb_node, src_image_state);
AddCommandBufferBindingImage(cb_node, dst_image_state);
}
void ValidationStateTracker::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) {
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Update bindings between images and cmd buffer
AddCommandBufferBindingImage(cb_node, src_image_state);
AddCommandBufferBindingImage(cb_node, dst_image_state);
}
void ValidationStateTracker::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout,
uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Update bindings between images and cmd buffer
AddCommandBufferBindingImage(cb_node, src_image_state);
AddCommandBufferBindingImage(cb_node, dst_image_state);
}
void ValidationStateTracker::PostCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer,
VkResult result) {
if (result != VK_SUCCESS) return;
// TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
auto buffer_state = std::make_shared<BUFFER_STATE>(*pBuffer, pCreateInfo);
// Get a set of requirements in the case the app does not
DispatchGetBufferMemoryRequirements(device, *pBuffer, &buffer_state->requirements);
bufferMap.insert(std::make_pair(*pBuffer, std::move(buffer_state)));
}
void ValidationStateTracker::PostCallRecordCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView,
VkResult result) {
if (result != VK_SUCCESS) return;
auto buffer_state = GetBufferShared(pCreateInfo->buffer);
bufferViewMap[*pView] = std::make_shared<BUFFER_VIEW_STATE>(buffer_state, *pView, pCreateInfo);
}
void ValidationStateTracker::PostCallRecordCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView,
VkResult result) {
if (result != VK_SUCCESS) return;
auto image_state = GetImageShared(pCreateInfo->image);
imageViewMap[*pView] = std::make_shared<IMAGE_VIEW_STATE>(image_state, *pView, pCreateInfo);
}
void ValidationStateTracker::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) {
auto cb_node = GetCBState(commandBuffer);
auto src_buffer_state = GetBufferState(srcBuffer);
auto dst_buffer_state = GetBufferState(dstBuffer);
// Update bindings between buffers and cmd buffer
AddCommandBufferBindingBuffer(cb_node, src_buffer_state);
AddCommandBufferBindingBuffer(cb_node, dst_buffer_state);
}
void ValidationStateTracker::PreCallRecordDestroyImageView(VkDevice device, VkImageView imageView,
const VkAllocationCallbacks *pAllocator) {
IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView);
if (!image_view_state) return;
const VulkanTypedHandle obj_struct(imageView, kVulkanObjectTypeImageView);
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(image_view_state->cb_bindings, obj_struct);
image_view_state->destroyed = true;
imageViewMap.erase(imageView);
}
void ValidationStateTracker::PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
if (!buffer) return;
auto buffer_state = GetBufferState(buffer);
const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer);
InvalidateCommandBuffers(buffer_state->cb_bindings, obj_struct);
for (auto mem_binding : buffer_state->GetBoundMemory()) {
auto mem_info = GetDevMemState(mem_binding);
if (mem_info) {
RemoveBufferMemoryRange(buffer, mem_info);
}
}
ClearMemoryObjectBindings(obj_struct);
buffer_state->destroyed = true;
bufferMap.erase(buffer_state->buffer);
}
void ValidationStateTracker::PreCallRecordDestroyBufferView(VkDevice device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator) {
if (!bufferView) return;
auto buffer_view_state = GetBufferViewState(bufferView);
const VulkanTypedHandle obj_struct(bufferView, kVulkanObjectTypeBufferView);
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct);
buffer_view_state->destroyed = true;
bufferViewMap.erase(bufferView);
}
void ValidationStateTracker::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) {
auto cb_node = GetCBState(commandBuffer);
auto buffer_state = GetBufferState(dstBuffer);
// Update bindings between buffer and cmd buffer
AddCommandBufferBindingBuffer(cb_node, buffer_state);
}
void ValidationStateTracker::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferImageCopy *pRegions) {
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_buffer_state = GetBufferState(dstBuffer);
// Update bindings between buffer/image and cmd buffer
AddCommandBufferBindingImage(cb_node, src_image_state);
AddCommandBufferBindingBuffer(cb_node, dst_buffer_state);
}
void ValidationStateTracker::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
auto cb_node = GetCBState(commandBuffer);
auto src_buffer_state = GetBufferState(srcBuffer);
auto dst_image_state = GetImageState(dstImage);
AddCommandBufferBindingBuffer(cb_node, src_buffer_state);
AddCommandBufferBindingImage(cb_node, dst_image_state);
}
// Get the image viewstate for a given framebuffer attachment
IMAGE_VIEW_STATE *ValidationStateTracker::GetAttachmentImageViewState(FRAMEBUFFER_STATE *framebuffer, uint32_t index) {
if (framebuffer->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) return nullptr;
assert(framebuffer && (index < framebuffer->createInfo.attachmentCount));
const VkImageView &image_view = framebuffer->createInfo.pAttachments[index];
return GetImageViewState(image_view);
}
// Get the image viewstate for a given framebuffer attachment
const IMAGE_VIEW_STATE *ValidationStateTracker::GetAttachmentImageViewState(const FRAMEBUFFER_STATE *framebuffer,
uint32_t index) const {
if (framebuffer->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) return nullptr;
assert(framebuffer && (index < framebuffer->createInfo.attachmentCount));
const VkImageView &image_view = framebuffer->createInfo.pAttachments[index];
return GetImageViewState(image_view);
}
void ValidationStateTracker::AddAliasingImage(IMAGE_STATE *image_state) {
if (!(image_state->createInfo.flags & VK_IMAGE_CREATE_ALIAS_BIT)) return;
std::unordered_set<VkImage> *bound_images = nullptr;
if (image_state->bind_swapchain) {
auto swapchain_state = GetSwapchainState(image_state->bind_swapchain);
if (swapchain_state) {
bound_images = &swapchain_state->images[image_state->bind_swapchain_imageIndex].bound_images;
}
} else {
auto mem_state = GetDevMemState(image_state->binding.mem);
if (mem_state) {
bound_images = &mem_state->bound_images;
}
}
if (bound_images) {
for (const auto &handle : *bound_images) {
if (handle != image_state->image) {
auto is = GetImageState(handle);
if (is && is->IsCompatibleAliasing(image_state)) {
auto inserted = is->aliasing_images.emplace(image_state->image);
if (inserted.second) {
image_state->aliasing_images.emplace(handle);
}
}
}
}
}
}
void ValidationStateTracker::RemoveAliasingImage(IMAGE_STATE *image_state) {
for (const auto &image : image_state->aliasing_images) {
auto is = GetImageState(image);
if (is) {
is->aliasing_images.erase(image_state->image);
}
}
image_state->aliasing_images.clear();
}
void ValidationStateTracker::RemoveAliasingImages(const std::unordered_set<VkImage> &bound_images) {
// This is one way clear. Because the bound_images include cross references, the one way clear loop could clear the whole
// reference. It doesn't need two ways clear.
for (const auto &handle : bound_images) {
auto is = GetImageState(handle);
if (is) {
is->aliasing_images.clear();
}
}
}
const EVENT_STATE *ValidationStateTracker::GetEventState(VkEvent event) const {
auto it = eventMap.find(event);
if (it == eventMap.end()) {
return nullptr;
}
return &it->second;
}
EVENT_STATE *ValidationStateTracker::GetEventState(VkEvent event) {
auto it = eventMap.find(event);
if (it == eventMap.end()) {
return nullptr;
}
return &it->second;
}
const QUEUE_STATE *ValidationStateTracker::GetQueueState(VkQueue queue) const {
auto it = queueMap.find(queue);
if (it == queueMap.cend()) {
return nullptr;
}
return &it->second;
}
QUEUE_STATE *ValidationStateTracker::GetQueueState(VkQueue queue) {
auto it = queueMap.find(queue);
if (it == queueMap.end()) {
return nullptr;
}
return &it->second;
}
const PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState(VkPhysicalDevice phys) const {
auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map);
auto it = phys_dev_map->find(phys);
if (it == phys_dev_map->end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState(VkPhysicalDevice phys) {
auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map);
auto it = phys_dev_map->find(phys);
if (it == phys_dev_map->end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState() { return physical_device_state; }
const PHYSICAL_DEVICE_STATE *ValidationStateTracker::GetPhysicalDeviceState() const { return physical_device_state; }
// Return ptr to memory binding for given handle of specified type
template <typename State, typename Result>
static Result GetObjectMemBindingImpl(State state, const VulkanTypedHandle &typed_handle) {
switch (typed_handle.type) {
case kVulkanObjectTypeImage:
return state->GetImageState(typed_handle.Cast<VkImage>());
case kVulkanObjectTypeBuffer:
return state->GetBufferState(typed_handle.Cast<VkBuffer>());
case kVulkanObjectTypeAccelerationStructureNV:
return state->GetAccelerationStructureState(typed_handle.Cast<VkAccelerationStructureNV>());
default:
break;
}
return nullptr;
}
const BINDABLE *ValidationStateTracker::GetObjectMemBinding(const VulkanTypedHandle &typed_handle) const {
return GetObjectMemBindingImpl<const ValidationStateTracker *, const BINDABLE *>(this, typed_handle);
}
BINDABLE *ValidationStateTracker::GetObjectMemBinding(const VulkanTypedHandle &typed_handle) {
return GetObjectMemBindingImpl<ValidationStateTracker *, BINDABLE *>(this, typed_handle);
}
void ValidationStateTracker::AddMemObjInfo(void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
memObjMap[mem] = std::make_shared<DEVICE_MEMORY_STATE>(object, mem, pAllocateInfo);
auto mem_info = memObjMap[mem].get();
auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
if (dedicated) {
mem_info->is_dedicated = true;
mem_info->dedicated_buffer = dedicated->buffer;
mem_info->dedicated_image = dedicated->image;
}
auto export_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(pAllocateInfo->pNext);
if (export_info) {
mem_info->is_export = true;
mem_info->export_handle_type_flags = export_info->handleTypes;
}
}
// Create binding link between given sampler and command buffer node
void ValidationStateTracker::AddCommandBufferBindingSampler(CMD_BUFFER_STATE *cb_node, SAMPLER_STATE *sampler_state) {
if (disabled.command_buffer_state) {
return;
}
AddCommandBufferBinding(sampler_state->cb_bindings,
VulkanTypedHandle(sampler_state->sampler, kVulkanObjectTypeSampler, sampler_state), cb_node);
}
// Create binding link between given image node and command buffer node
void ValidationStateTracker::AddCommandBufferBindingImage(CMD_BUFFER_STATE *cb_node, IMAGE_STATE *image_state) {
if (disabled.command_buffer_state) {
return;
}
// Skip validation if this image was created through WSI
if (image_state->create_from_swapchain == VK_NULL_HANDLE) {
// First update cb binding for image
if (AddCommandBufferBinding(image_state->cb_bindings,
VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage, image_state), cb_node)) {
// Now update CB binding in MemObj mini CB list
for (auto mem_binding : image_state->GetBoundMemory()) {
DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
AddCommandBufferBinding(pMemInfo->cb_bindings,
VulkanTypedHandle(mem_binding, kVulkanObjectTypeDeviceMemory, pMemInfo), cb_node);
}
}
}
}
}
// Create binding link between given image view node and its image with command buffer node
void ValidationStateTracker::AddCommandBufferBindingImageView(CMD_BUFFER_STATE *cb_node, IMAGE_VIEW_STATE *view_state) {
if (disabled.command_buffer_state) {
return;
}
// First add bindings for imageView
if (AddCommandBufferBinding(view_state->cb_bindings,
VulkanTypedHandle(view_state->image_view, kVulkanObjectTypeImageView, view_state), cb_node)) {
// Only need to continue if this is a new item
auto image_state = view_state->image_state.get();
// Add bindings for image within imageView
if (image_state) {
AddCommandBufferBindingImage(cb_node, image_state);
}
}
}
// Create binding link between given buffer node and command buffer node
void ValidationStateTracker::AddCommandBufferBindingBuffer(CMD_BUFFER_STATE *cb_node, BUFFER_STATE *buffer_state) {
if (disabled.command_buffer_state) {
return;
}
// First update cb binding for buffer
if (AddCommandBufferBinding(buffer_state->cb_bindings,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer, buffer_state), cb_node)) {
// Now update CB binding in MemObj mini CB list
for (auto mem_binding : buffer_state->GetBoundMemory()) {
DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
AddCommandBufferBinding(pMemInfo->cb_bindings,
VulkanTypedHandle(mem_binding, kVulkanObjectTypeDeviceMemory, pMemInfo), cb_node);
}
}
}
}
// Create binding link between given buffer view node and its buffer with command buffer node
void ValidationStateTracker::AddCommandBufferBindingBufferView(CMD_BUFFER_STATE *cb_node, BUFFER_VIEW_STATE *view_state) {
if (disabled.command_buffer_state) {
return;
}
// First add bindings for bufferView
if (AddCommandBufferBinding(view_state->cb_bindings,
VulkanTypedHandle(view_state->buffer_view, kVulkanObjectTypeBufferView, view_state), cb_node)) {
auto buffer_state = view_state->buffer_state.get();
// Add bindings for buffer within bufferView
if (buffer_state) {
AddCommandBufferBindingBuffer(cb_node, buffer_state);
}
}
}
// Create binding link between given acceleration structure and command buffer node
void ValidationStateTracker::AddCommandBufferBindingAccelerationStructure(CMD_BUFFER_STATE *cb_node,
ACCELERATION_STRUCTURE_STATE *as_state) {
if (disabled.command_buffer_state) {
return;
}
if (AddCommandBufferBinding(
as_state->cb_bindings,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV, as_state), cb_node)) {
// Now update CB binding in MemObj mini CB list
for (auto mem_binding : as_state->GetBoundMemory()) {
DEVICE_MEMORY_STATE *pMemInfo = GetDevMemState(mem_binding);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
AddCommandBufferBinding(pMemInfo->cb_bindings,
VulkanTypedHandle(mem_binding, kVulkanObjectTypeDeviceMemory, pMemInfo), cb_node);
}
}
}
}
// Clear a single object binding from given memory object
void ValidationStateTracker::ClearMemoryObjectBinding(const VulkanTypedHandle &typed_handle, VkDeviceMemory mem) {
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list
if (mem_info) {
mem_info->obj_bindings.erase(typed_handle);
}
}
// ClearMemoryObjectBindings clears the binding of objects to memory
// For the given object it pulls the memory bindings and makes sure that the bindings
// no longer refer to the object being cleared. This occurs when objects are destroyed.
void ValidationStateTracker::ClearMemoryObjectBindings(const VulkanTypedHandle &typed_handle) {
BINDABLE *mem_binding = GetObjectMemBinding(typed_handle);
if (mem_binding) {
if (!mem_binding->sparse) {
ClearMemoryObjectBinding(typed_handle, mem_binding->binding.mem);
} else { // Sparse, clear all bindings
for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
ClearMemoryObjectBinding(typed_handle, sparse_mem_binding.mem);
}
}
}
}
// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
// Corresponding valid usage checks are in ValidateSetMemBinding().
void ValidationStateTracker::SetMemBinding(VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
const VulkanTypedHandle &typed_handle) {
assert(mem_binding);
mem_binding->binding.mem = mem;
mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
mem_binding->binding.offset = memory_offset;
mem_binding->binding.size = mem_binding->requirements.size;
if (mem != VK_NULL_HANDLE) {
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
mem_info->obj_bindings.insert(typed_handle);
// For image objects, make sure default memory state is correctly set
// TODO : What's the best/correct way to handle this?
if (kVulkanObjectTypeImage == typed_handle.type) {
auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
if (image_state) {
VkImageCreateInfo ici = image_state->createInfo;
if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
}
}
}
}
// For NULL mem case, clear any previous binding Else...
// Make sure given object is in its object map
// IF a previous binding existed, update binding
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
bool ValidationStateTracker::SetSparseMemBinding(MEM_BINDING binding, const VulkanTypedHandle &typed_handle) {
bool skip = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (binding.mem == VK_NULL_HANDLE) {
// TODO : This should cause the range of the resource to be unbound according to spec
} else {
BINDABLE *mem_binding = GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding) { // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here
assert(mem_binding->sparse);
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(binding.mem);
if (mem_info) {
mem_info->obj_bindings.insert(typed_handle);
// Need to set mem binding for this object
mem_binding->sparse_bindings.insert(binding);
mem_binding->UpdateBoundMemorySet();
}
}
}
return skip;
}
void ValidationStateTracker::UpdateDrawState(CMD_BUFFER_STATE *cb_state, const VkPipelineBindPoint bind_point) {
auto &state = cb_state->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (VK_NULL_HANDLE != state.pipeline_layout) {
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// Pull the set node
cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[setIndex].bound_descriptor_set;
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
// TODO: If recreating the reduced_map here shows up in profilinging, need to find a way of sharing with the
// Validate pass. Though in the case of "many" descriptors, typically the descriptor count >> binding count
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_state, *pPipe);
if (reduced_map.IsManyDescriptors()) {
// Only update validate binding tags if we meet the "many" criteria in the Prefilter class
descriptor_set->UpdateValidationCache(*cb_state, *pPipe, binding_req_map);
}
// We can skip updating the state if "nothing" has changed since the last validation.
// See CoreChecks::ValidateCmdBufDrawState for more details.
bool descriptor_set_changed =
!reduced_map.IsManyDescriptors() ||
// Update if descriptor set (or contents) has changed
state.per_set[setIndex].validated_set != descriptor_set ||
state.per_set[setIndex].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled.image_layout_validation &&
state.per_set[setIndex].validated_set_image_layout_change_count != cb_state->image_layout_change_count);
bool need_update = descriptor_set_changed ||
// Update if previous bindingReqMap doesn't include new bindingReqMap
!std::includes(state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(),
binding_req_map.begin(), binding_req_map.end());
if (need_update) {
// Bind this set and its active descriptor resources to the command buffer
if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) {
// Only record the bindings that haven't already been recorded
BindingReqMap delta_reqs;
std::set_difference(binding_req_map.begin(), binding_req_map.end(),
state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(),
std::inserter(delta_reqs, delta_reqs.begin()));
descriptor_set->UpdateDrawState(this, cb_state, pPipe, delta_reqs);
} else {
descriptor_set->UpdateDrawState(this, cb_state, pPipe, binding_req_map);
}
state.per_set[setIndex].validated_set = descriptor_set;
state.per_set[setIndex].validated_set_change_count = descriptor_set->GetChangeCount();
state.per_set[setIndex].validated_set_image_layout_change_count = cb_state->image_layout_change_count;
if (reduced_map.IsManyDescriptors()) {
// Check whether old == new before assigning, the equality check is much cheaper than
// freeing and reallocating the map.
if (state.per_set[setIndex].validated_set_binding_req_map != set_binding_pair.second) {
state.per_set[setIndex].validated_set_binding_req_map = set_binding_pair.second;
}
} else {
state.per_set[setIndex].validated_set_binding_req_map = BindingReqMap();
}
}
}
}
}
if (!pPipe->vertex_binding_descriptions_.empty()) {
cb_state->vertex_buffer_used = true;
}
}
// Remove set from setMap and delete the set
void ValidationStateTracker::FreeDescriptorSet(cvdescriptorset::DescriptorSet *descriptor_set) {
descriptor_set->destroyed = true;
const VulkanTypedHandle obj_struct(descriptor_set->GetSet(), kVulkanObjectTypeDescriptorSet);
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(descriptor_set->cb_bindings, obj_struct);
setMap.erase(descriptor_set->GetSet());
}
// Free all DS Pools including their Sets & related sub-structs
// NOTE : Calls to this function should be wrapped in mutex
void ValidationStateTracker::DeleteDescriptorSetPools() {
for (auto ii = descriptorPoolMap.begin(); ii != descriptorPoolMap.end();) {
// Remove this pools' sets from setMap and delete them
for (auto ds : ii->second->sets) {
FreeDescriptorSet(ds);
}
ii->second->sets.clear();
ii = descriptorPoolMap.erase(ii);
}
}
// For given object struct return a ptr of BASE_NODE type for its wrapping struct
BASE_NODE *ValidationStateTracker::GetStateStructPtrFromObject(const VulkanTypedHandle &object_struct) {
if (object_struct.node) {
#ifdef _DEBUG
// assert that lookup would find the same object
VulkanTypedHandle other = object_struct;
other.node = nullptr;
assert(object_struct.node == GetStateStructPtrFromObject(other));
#endif
return object_struct.node;
}
BASE_NODE *base_ptr = nullptr;
switch (object_struct.type) {
case kVulkanObjectTypeDescriptorSet: {
base_ptr = GetSetNode(object_struct.Cast<VkDescriptorSet>());
break;
}
case kVulkanObjectTypeSampler: {
base_ptr = GetSamplerState(object_struct.Cast<VkSampler>());
break;
}
case kVulkanObjectTypeQueryPool: {
base_ptr = GetQueryPoolState(object_struct.Cast<VkQueryPool>());
break;
}
case kVulkanObjectTypePipeline: {
base_ptr = GetPipelineState(object_struct.Cast<VkPipeline>());
break;
}
case kVulkanObjectTypeBuffer: {
base_ptr = GetBufferState(object_struct.Cast<VkBuffer>());
break;
}
case kVulkanObjectTypeBufferView: {
base_ptr = GetBufferViewState(object_struct.Cast<VkBufferView>());
break;
}
case kVulkanObjectTypeImage: {
base_ptr = GetImageState(object_struct.Cast<VkImage>());
break;
}
case kVulkanObjectTypeImageView: {
base_ptr = GetImageViewState(object_struct.Cast<VkImageView>());
break;
}
case kVulkanObjectTypeEvent: {
base_ptr = GetEventState(object_struct.Cast<VkEvent>());
break;
}
case kVulkanObjectTypeDescriptorPool: {
base_ptr = GetDescriptorPoolState(object_struct.Cast<VkDescriptorPool>());
break;
}
case kVulkanObjectTypeCommandPool: {
base_ptr = GetCommandPoolState(object_struct.Cast<VkCommandPool>());
break;
}
case kVulkanObjectTypeFramebuffer: {
base_ptr = GetFramebufferState(object_struct.Cast<VkFramebuffer>());
break;
}
case kVulkanObjectTypeRenderPass: {
base_ptr = GetRenderPassState(object_struct.Cast<VkRenderPass>());
break;
}
case kVulkanObjectTypeDeviceMemory: {
base_ptr = GetDevMemState(object_struct.Cast<VkDeviceMemory>());
break;
}
case kVulkanObjectTypeAccelerationStructureNV: {
base_ptr = GetAccelerationStructureState(object_struct.Cast<VkAccelerationStructureNV>());
break;
}
case kVulkanObjectTypeUnknown:
// This can happen if an element of the object_bindings vector has been
// zeroed out, after an object is destroyed.
break;
default:
// TODO : Any other objects to be handled here?
assert(0);
break;
}
return base_ptr;
}
// Tie the VulkanTypedHandle to the cmd buffer which includes:
// Add object_binding to cmd buffer
// Add cb_binding to object
bool ValidationStateTracker::AddCommandBufferBinding(small_unordered_map<CMD_BUFFER_STATE *, int, 8> &cb_bindings,
const VulkanTypedHandle &obj, CMD_BUFFER_STATE *cb_node) {
if (disabled.command_buffer_state) {
return false;
}
// Insert the cb_binding with a default 'index' of -1. Then push the obj into the object_bindings
// vector, and update cb_bindings[cb_node] with the index of that element of the vector.
auto inserted = cb_bindings.insert({cb_node, -1});
if (inserted.second) {
cb_node->object_bindings.push_back(obj);
inserted.first->second = (int)cb_node->object_bindings.size() - 1;
return true;
}
return false;
}
// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
void ValidationStateTracker::RemoveCommandBufferBinding(VulkanTypedHandle const &object, CMD_BUFFER_STATE *cb_node) {
BASE_NODE *base_obj = GetStateStructPtrFromObject(object);
if (base_obj) base_obj->cb_bindings.erase(cb_node);
}
// Reset the command buffer state
// Maintain the createInfo and set state to CB_NEW, but clear all other state
void ValidationStateTracker::ResetCommandBufferState(const VkCommandBuffer cb) {
CMD_BUFFER_STATE *pCB = GetCBState(cb);
if (pCB) {
pCB->in_use.store(0);
// Reset CB state (note that createInfo is not cleared)
pCB->commandBuffer = cb;
memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
pCB->hasDrawCmd = false;
pCB->hasTraceRaysCmd = false;
pCB->hasBuildAccelerationStructureCmd = false;
pCB->hasDispatchCmd = false;
pCB->state = CB_NEW;
pCB->commandCount = 0;
pCB->submitCount = 0;
pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
pCB->status = 0;
pCB->static_status = 0;
pCB->viewportMask = 0;
pCB->scissorMask = 0;
for (auto &item : pCB->lastBound) {
item.second.reset();
}
memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
pCB->activeRenderPass = nullptr;
pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
pCB->activeSubpass = 0;
pCB->broken_bindings.clear();
pCB->waitedEvents.clear();
pCB->events.clear();
pCB->writeEventsBeforeWait.clear();
pCB->activeQueries.clear();
pCB->startedQueries.clear();
pCB->image_layout_map.clear();
pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings.clear();
pCB->vertex_buffer_used = false;
pCB->primaryCommandBuffer = VK_NULL_HANDLE;
// If secondary, invalidate any primary command buffer that may call us.
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateLinkedCommandBuffers(pCB->linkedCommandBuffers, VulkanTypedHandle(cb, kVulkanObjectTypeCommandBuffer));
}
// Remove reverse command buffer links.
for (auto pSubCB : pCB->linkedCommandBuffers) {
pSubCB->linkedCommandBuffers.erase(pCB);
}
pCB->linkedCommandBuffers.clear();
pCB->queue_submit_functions.clear();
pCB->cmd_execute_commands_functions.clear();
pCB->eventUpdates.clear();
pCB->queryUpdates.clear();
// Remove object bindings
for (const auto &obj : pCB->object_bindings) {
RemoveCommandBufferBinding(obj, pCB);
}
pCB->object_bindings.clear();
// Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
for (auto framebuffer : pCB->framebuffers) {
auto fb_state = GetFramebufferState(framebuffer);
if (fb_state) fb_state->cb_bindings.erase(pCB);
}
pCB->framebuffers.clear();
pCB->activeFramebuffer = VK_NULL_HANDLE;
memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
pCB->qfo_transfer_image_barriers.Reset();
pCB->qfo_transfer_buffer_barriers.Reset();
// Clean up the label data
ResetCmdDebugUtilsLabel(report_data, pCB->commandBuffer);
pCB->debug_label.Reset();
pCB->validate_descriptorsets_in_queuesubmit.clear();
}
if (command_buffer_reset_callback) {
(*command_buffer_reset_callback)(cb);
}
}
void ValidationStateTracker::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
VkResult result) {
if (VK_SUCCESS != result) return;
const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
if (nullptr == enabled_features_found) {
const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
if (features2) {
enabled_features_found = &(features2->features);
}
}
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, this->container_type);
ValidationStateTracker *state_tracker = static_cast<ValidationStateTracker *>(validation_data);
if (nullptr == enabled_features_found) {
state_tracker->enabled_features.core = {};
} else {
state_tracker->enabled_features.core = *enabled_features_found;
}
// Make sure that queue_family_properties are obtained for this device's physical_device, even if the app has not
// previously set them through an explicit API call.
uint32_t count;
auto pd_state = GetPhysicalDeviceState(gpu);
DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
DispatchGetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]);
// Save local link to this device's physical device state
state_tracker->physical_device_state = pd_state;
const auto *device_group_ci = lvl_find_in_chain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext);
state_tracker->physical_device_count =
device_group_ci && device_group_ci->physicalDeviceCount > 0 ? device_group_ci->physicalDeviceCount : 1;
const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
if (descriptor_indexing_features) {
state_tracker->enabled_features.descriptor_indexing = *descriptor_indexing_features;
}
const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext);
if (eight_bit_storage_features) {
state_tracker->enabled_features.eight_bit_storage = *eight_bit_storage_features;
}
const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext);
if (exclusive_scissor_features) {
state_tracker->enabled_features.exclusive_scissor = *exclusive_scissor_features;
}
const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features) {
state_tracker->enabled_features.shading_rate_image = *shading_rate_image_features;
}
const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext);
if (mesh_shader_features) {
state_tracker->enabled_features.mesh_shader = *mesh_shader_features;
}
const auto *inline_uniform_block_features =
lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext);
if (inline_uniform_block_features) {
state_tracker->enabled_features.inline_uniform_block = *inline_uniform_block_features;
}
const auto *transform_feedback_features = lvl_find_in_chain<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(pCreateInfo->pNext);
if (transform_feedback_features) {
state_tracker->enabled_features.transform_feedback_features = *transform_feedback_features;
}
const auto *float16_int8_features = lvl_find_in_chain<VkPhysicalDeviceFloat16Int8FeaturesKHR>(pCreateInfo->pNext);
if (float16_int8_features) {
state_tracker->enabled_features.float16_int8 = *float16_int8_features;
}
const auto *vtx_attrib_div_features = lvl_find_in_chain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext);
if (vtx_attrib_div_features) {
state_tracker->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features;
}
const auto *uniform_buffer_standard_layout_features =
lvl_find_in_chain<VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR>(pCreateInfo->pNext);
if (uniform_buffer_standard_layout_features) {
state_tracker->enabled_features.uniform_buffer_standard_layout = *uniform_buffer_standard_layout_features;
}
const auto *scalar_block_layout_features = lvl_find_in_chain<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(pCreateInfo->pNext);
if (scalar_block_layout_features) {
state_tracker->enabled_features.scalar_block_layout_features = *scalar_block_layout_features;
}
const auto *buffer_device_address = lvl_find_in_chain<VkPhysicalDeviceBufferDeviceAddressFeaturesKHR>(pCreateInfo->pNext);
if (buffer_device_address) {
state_tracker->enabled_features.buffer_device_address = *buffer_device_address;
}
const auto *buffer_device_address_ext = lvl_find_in_chain<VkPhysicalDeviceBufferDeviceAddressFeaturesEXT>(pCreateInfo->pNext);
if (buffer_device_address_ext) {
state_tracker->enabled_features.buffer_device_address_ext = *buffer_device_address_ext;
}
const auto *cooperative_matrix_features = lvl_find_in_chain<VkPhysicalDeviceCooperativeMatrixFeaturesNV>(pCreateInfo->pNext);
if (cooperative_matrix_features) {
state_tracker->enabled_features.cooperative_matrix_features = *cooperative_matrix_features;
}
const auto *host_query_reset_features = lvl_find_in_chain<VkPhysicalDeviceHostQueryResetFeaturesEXT>(pCreateInfo->pNext);
if (host_query_reset_features) {
state_tracker->enabled_features.host_query_reset_features = *host_query_reset_features;
}
const auto *compute_shader_derivatives_features =
lvl_find_in_chain<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV>(pCreateInfo->pNext);
if (compute_shader_derivatives_features) {
state_tracker->enabled_features.compute_shader_derivatives_features = *compute_shader_derivatives_features;
}
const auto *fragment_shader_barycentric_features =
lvl_find_in_chain<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV>(pCreateInfo->pNext);
if (fragment_shader_barycentric_features) {
state_tracker->enabled_features.fragment_shader_barycentric_features = *fragment_shader_barycentric_features;
}
const auto *shader_image_footprint_features =
lvl_find_in_chain<VkPhysicalDeviceShaderImageFootprintFeaturesNV>(pCreateInfo->pNext);
if (shader_image_footprint_features) {
state_tracker->enabled_features.shader_image_footprint_features = *shader_image_footprint_features;
}
const auto *fragment_shader_interlock_features =
lvl_find_in_chain<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT>(pCreateInfo->pNext);
if (fragment_shader_interlock_features) {
state_tracker->enabled_features.fragment_shader_interlock_features = *fragment_shader_interlock_features;
}
const auto *demote_to_helper_invocation_features =
lvl_find_in_chain<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT>(pCreateInfo->pNext);
if (demote_to_helper_invocation_features) {
state_tracker->enabled_features.demote_to_helper_invocation_features = *demote_to_helper_invocation_features;
}
const auto *texel_buffer_alignment_features =
lvl_find_in_chain<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT>(pCreateInfo->pNext);
if (texel_buffer_alignment_features) {
state_tracker->enabled_features.texel_buffer_alignment_features = *texel_buffer_alignment_features;
}
const auto *imageless_framebuffer_features =
lvl_find_in_chain<VkPhysicalDeviceImagelessFramebufferFeaturesKHR>(pCreateInfo->pNext);
if (imageless_framebuffer_features) {
state_tracker->enabled_features.imageless_framebuffer_features = *imageless_framebuffer_features;
}
const auto *pipeline_exe_props_features =
lvl_find_in_chain<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR>(pCreateInfo->pNext);
if (pipeline_exe_props_features) {
state_tracker->enabled_features.pipeline_exe_props_features = *pipeline_exe_props_features;
}
const auto *dedicated_allocation_image_aliasing_features =
lvl_find_in_chain<VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV>(pCreateInfo->pNext);
if (dedicated_allocation_image_aliasing_features) {
state_tracker->enabled_features.dedicated_allocation_image_aliasing_features =
*dedicated_allocation_image_aliasing_features;
}
const auto *subgroup_extended_types_features =
lvl_find_in_chain<VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR>(pCreateInfo->pNext);
if (subgroup_extended_types_features) {
state_tracker->enabled_features.subgroup_extended_types_features = *subgroup_extended_types_features;
}
const auto *separate_depth_stencil_layouts_features =
lvl_find_in_chain<VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR>(pCreateInfo->pNext);
if (separate_depth_stencil_layouts_features) {
state_tracker->enabled_features.separate_depth_stencil_layouts_features = *separate_depth_stencil_layouts_features;
}
const auto *performance_query_features = lvl_find_in_chain<VkPhysicalDevicePerformanceQueryFeaturesKHR>(pCreateInfo->pNext);
if (performance_query_features) {
state_tracker->enabled_features.performance_query_features = *performance_query_features;
}
const auto *timeline_semaphore_features = lvl_find_in_chain<VkPhysicalDeviceTimelineSemaphoreFeaturesKHR>(pCreateInfo->pNext);
if (timeline_semaphore_features) {
state_tracker->enabled_features.timeline_semaphore_features = *timeline_semaphore_features;
}
const auto *device_coherent_memory_features = lvl_find_in_chain<VkPhysicalDeviceCoherentMemoryFeaturesAMD>(pCreateInfo->pNext);
if (device_coherent_memory_features) {
state_tracker->enabled_features.device_coherent_memory_features = *device_coherent_memory_features;
}
// Store physical device properties and physical device mem limits into CoreChecks structs
DispatchGetPhysicalDeviceMemoryProperties(gpu, &state_tracker->phys_dev_mem_props);
DispatchGetPhysicalDeviceProperties(gpu, &state_tracker->phys_dev_props);
const auto &dev_ext = state_tracker->device_extensions;
auto *phys_dev_props = &state_tracker->phys_dev_ext_props;
if (dev_ext.vk_khr_push_descriptor) {
// Get the needed push_descriptor limits
VkPhysicalDevicePushDescriptorPropertiesKHR push_descriptor_prop;
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_push_descriptor, &push_descriptor_prop);
phys_dev_props->max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
}
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_descriptor_indexing, &phys_dev_props->descriptor_indexing_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_shading_rate_image, &phys_dev_props->shading_rate_image_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_mesh_shader, &phys_dev_props->mesh_shader_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_inline_uniform_block, &phys_dev_props->inline_uniform_block_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_vertex_attribute_divisor, &phys_dev_props->vtx_attrib_divisor_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_depth_stencil_resolve, &phys_dev_props->depth_stencil_resolve_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_transform_feedback, &phys_dev_props->transform_feedback_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_nv_ray_tracing, &phys_dev_props->ray_tracing_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_texel_buffer_alignment, &phys_dev_props->texel_buffer_alignment_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_ext_fragment_density_map, &phys_dev_props->fragment_density_map_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_performance_query, &phys_dev_props->performance_query_props);
GetPhysicalDeviceExtProperties(gpu, dev_ext.vk_khr_timeline_semaphore, &phys_dev_props->timeline_semaphore_props);
if (state_tracker->device_extensions.vk_nv_cooperative_matrix) {
// Get the needed cooperative_matrix properties
auto cooperative_matrix_props = lvl_init_struct<VkPhysicalDeviceCooperativeMatrixPropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&cooperative_matrix_props);
instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
state_tracker->phys_dev_ext_props.cooperative_matrix_props = cooperative_matrix_props;
uint32_t numCooperativeMatrixProperties = 0;
instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties, NULL);
state_tracker->cooperative_matrix_properties.resize(numCooperativeMatrixProperties,
lvl_init_struct<VkCooperativeMatrixPropertiesNV>());
instance_dispatch_table.GetPhysicalDeviceCooperativeMatrixPropertiesNV(gpu, &numCooperativeMatrixProperties,
state_tracker->cooperative_matrix_properties.data());
}
if (state_tracker->api_version >= VK_API_VERSION_1_1) {
// Get the needed subgroup limits
auto subgroup_prop = lvl_init_struct<VkPhysicalDeviceSubgroupProperties>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&subgroup_prop);
instance_dispatch_table.GetPhysicalDeviceProperties2(gpu, &prop2);
state_tracker->phys_dev_ext_props.subgroup_props = subgroup_prop;
}
// Store queue family data
if (pCreateInfo->pQueueCreateInfos != nullptr) {
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
state_tracker->queue_family_index_map.insert(
std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount));
}
}
}
void ValidationStateTracker::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
// Reset all command buffers before destroying them, to unlink object_bindings.
for (auto &commandBuffer : commandBufferMap) {
ResetCommandBufferState(commandBuffer.first);
}
pipelineMap.clear();
renderPassMap.clear();
commandBufferMap.clear();
// This will also delete all sets in the pool & remove them from setMap
DeleteDescriptorSetPools();
// All sets should be removed
assert(setMap.empty());
descriptorSetLayoutMap.clear();
imageViewMap.clear();
imageMap.clear();
bufferViewMap.clear();
bufferMap.clear();
// Queues persist until device is destroyed
queueMap.clear();
}
// Loop through bound objects and increment their in_use counts.
void ValidationStateTracker::IncrementBoundObjects(CMD_BUFFER_STATE const *cb_node) {
for (auto obj : cb_node->object_bindings) {
auto base_obj = GetStateStructPtrFromObject(obj);
if (base_obj) {
base_obj->in_use.fetch_add(1);
}
}
}
// Track which resources are in-flight by atomically incrementing their "in_use" count
void ValidationStateTracker::IncrementResources(CMD_BUFFER_STATE *cb_node) {
cb_node->submitCount++;
cb_node->in_use.fetch_add(1);
// First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
IncrementBoundObjects(cb_node);
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
// all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
// should then be flagged prior to calling this function
for (auto event : cb_node->writeEventsBeforeWait) {
auto event_state = GetEventState(event);
if (event_state) event_state->write_in_use++;
}
}
// Decrement in-use count for objects bound to command buffer
void ValidationStateTracker::DecrementBoundResources(CMD_BUFFER_STATE const *cb_node) {
BASE_NODE *base_obj = nullptr;
for (auto obj : cb_node->object_bindings) {
base_obj = GetStateStructPtrFromObject(obj);
if (base_obj) {
base_obj->in_use.fetch_sub(1);
}
}
}
void ValidationStateTracker::RetireWorkOnQueue(QUEUE_STATE *pQueue, uint64_t seq) {
std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
// Roll this queue forward, one submission at a time.
while (pQueue->seq < seq) {
auto &submission = pQueue->submissions.front();
for (auto &wait : submission.waitSemaphores) {
auto pSemaphore = GetSemaphoreState(wait.semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
auto &lastSeq = otherQueueSeqs[wait.queue];
lastSeq = std::max(lastSeq, wait.seq);
}
for (auto &semaphore : submission.signalSemaphores) {
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto &semaphore : submission.externalSemaphores) {
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto cb : submission.cbs) {
auto cb_node = GetCBState(cb);
if (!cb_node) {
continue;
}
// First perform decrement on general case bound objects
DecrementBoundResources(cb_node);
for (auto event : cb_node->writeEventsBeforeWait) {
auto eventNode = eventMap.find(event);
if (eventNode != eventMap.end()) {
eventNode->second.write_in_use--;
}
}
QueryMap localQueryToStateMap;
for (auto &function : cb_node->queryUpdates) {
function(nullptr, /*do_validate*/ false, &localQueryToStateMap);
}
for (auto queryStatePair : localQueryToStateMap) {
if (queryStatePair.second == QUERYSTATE_ENDED) {
queryToStateMap[queryStatePair.first] = QUERYSTATE_AVAILABLE;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryStatePair.first.pool);
if (qp_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR)
queryPassToStateMap[QueryObjectPass(queryStatePair.first, submission.perf_submit_pass)] =
QUERYSTATE_AVAILABLE;
}
}
cb_node->in_use.fetch_sub(1);
}
auto pFence = GetFenceState(submission.fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_RETIRED;
}
pQueue->submissions.pop_front();
pQueue->seq++;
}
// Roll other queues forward to the highest seq we saw a wait for
for (auto qs : otherQueueSeqs) {
RetireWorkOnQueue(GetQueueState(qs.first), qs.second);
}
}
// Submit a fence to a queue, delimiting previous fences and previous untracked
// work by it.
static void SubmitFence(QUEUE_STATE *pQueue, FENCE_STATE *pFence, uint64_t submitCount) {
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = pQueue->queue;
pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
}
void ValidationStateTracker::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence, VkResult result) {
uint64_t early_retire_seq = 0;
auto pQueue = GetQueueState(queue);
auto pFence = GetFenceState(fence);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
// Mark fence in use
SubmitFence(pQueue, pFence, std::max(1u, submitCount));
if (!submitCount) {
// If no submissions, but just dropping a fence on the end of the queue,
// record an empty submission with just the fence, so we can determine
// its completion.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence, 0);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
}
}
// Now process each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
std::vector<VkCommandBuffer> cbs;
const VkSubmitInfo *submit = &pSubmits[submit_idx];
vector<SEMAPHORE_WAIT> semaphore_waits;
vector<VkSemaphore> semaphore_signals;
vector<VkSemaphore> semaphore_externals;
auto *timeline_semaphore_submit = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(submit->pNext);
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
} else {
pSemaphore->payload = timeline_semaphore_submit->pSignalSemaphoreValues[i];
}
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
}
}
}
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
cbs.push_back(submit->pCommandBuffers[i]);
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
cbs.push_back(secondaryCmdBuffer->commandBuffer);
IncrementResources(secondaryCmdBuffer);
}
IncrementResources(cb_node);
QueryMap localQueryToStateMap;
for (auto &function : cb_node->queryUpdates) {
function(nullptr, /*do_validate*/ false, &localQueryToStateMap);
}
for (auto queryStatePair : localQueryToStateMap) {
queryToStateMap[queryStatePair.first] = queryStatePair.second;
}
EventToStageMap localEventToStageMap;
for (auto &function : cb_node->eventUpdates) {
function(nullptr, /*do_validate*/ false, &localEventToStageMap);
}
for (auto eventStagePair : localEventToStageMap) {
eventMap[eventStagePair.first].stageMask = eventStagePair.second;
}
}
}
const auto perf_submit = lvl_find_in_chain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
submit_idx == submitCount - 1 ? fence : (VkFence)VK_NULL_HANDLE,
perf_submit ? perf_submit->counterPassIndex : 0);
}
if (early_retire_seq) {
RetireWorkOnQueue(pQueue, early_retire_seq);
}
}
void ValidationStateTracker::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory,
VkResult result) {
if (VK_SUCCESS == result) {
AddMemObjInfo(device, *pMemory, pAllocateInfo);
}
return;
}
void ValidationStateTracker::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
if (!mem) return;
DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
// Clear mem binding for any bound objects
for (const auto &obj : mem_info->obj_bindings) {
BINDABLE *bindable_state = nullptr;
switch (obj.type) {
case kVulkanObjectTypeImage:
bindable_state = GetImageState(obj.Cast<VkImage>());
break;
case kVulkanObjectTypeBuffer:
bindable_state = GetBufferState(obj.Cast<VkBuffer>());
break;
case kVulkanObjectTypeAccelerationStructureNV:
bindable_state = GetAccelerationStructureState(obj.Cast<VkAccelerationStructureNV>());
break;
default:
// Should only have acceleration structure, buffer, or image objects bound to memory
assert(0);
}
if (bindable_state) {
bindable_state->binding.mem = MEMORY_UNBOUND;
bindable_state->UpdateBoundMemorySet();
}
}
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(mem_info->cb_bindings, obj_struct);
RemoveAliasingImages(mem_info->bound_images);
mem_info->destroyed = true;
memObjMap.erase(mem);
}
void ValidationStateTracker::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence, VkResult result) {
if (result != VK_SUCCESS) return;
uint64_t early_retire_seq = 0;
auto pFence = GetFenceState(fence);
auto pQueue = GetQueueState(queue);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
if (!bindInfoCount) {
// No work to do, just dropping a fence in the queue by itself.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence, 0);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
}
}
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
// Track objects tied to memory
for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
VulkanTypedHandle(bindInfo.pBufferBinds[j].buffer, kVulkanObjectTypeBuffer));
}
}
for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
VulkanTypedHandle(bindInfo.pImageOpaqueBinds[j].image, kVulkanObjectTypeImage));
}
}
for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
// TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
SetSparseMemBinding({sparse_binding.memory, sparse_binding.memoryOffset, size},
VulkanTypedHandle(bindInfo.pImageBinds[j].image, kVulkanObjectTypeImage));
}
}
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
std::vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
}
}
}
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
bindIdx == bindInfoCount - 1 ? fence : (VkFence)VK_NULL_HANDLE, 0);
}
if (early_retire_seq) {
RetireWorkOnQueue(pQueue, early_retire_seq);
}
}
void ValidationStateTracker::PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore,
VkResult result) {
if (VK_SUCCESS != result) return;
auto semaphore_state = std::make_shared<SEMAPHORE_STATE>();
semaphore_state->signaler.first = VK_NULL_HANDLE;
semaphore_state->signaler.second = 0;
semaphore_state->signaled = false;
semaphore_state->scope = kSyncScopeInternal;
semaphore_state->type = VK_SEMAPHORE_TYPE_BINARY_KHR;
semaphore_state->payload = 0;
auto semaphore_type_create_info = lvl_find_in_chain<VkSemaphoreTypeCreateInfoKHR>(pCreateInfo->pNext);
if (semaphore_type_create_info) {
semaphore_state->type = semaphore_type_create_info->semaphoreType;
semaphore_state->payload = semaphore_type_create_info->initialValue;
}
semaphoreMap[*pSemaphore] = std::move(semaphore_state);
}
void ValidationStateTracker::RecordImportSemaphoreState(VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type,
VkSemaphoreImportFlagsKHR flags) {
SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
sema_node->scope == kSyncScopeInternal) {
sema_node->scope = kSyncScopeExternalTemporary;
} else {
sema_node->scope = kSyncScopeExternalPermanent;
}
}
}
void ValidationStateTracker::PostCallRecordSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfoKHR *pSignalInfo,
VkResult result) {
auto *pSemaphore = GetSemaphoreState(pSignalInfo->semaphore);
pSemaphore->payload = pSignalInfo->value;
}
void ValidationStateTracker::RecordMappedMemory(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void **ppData) {
auto mem_info = GetDevMemState(mem);
if (mem_info) {
mem_info->mapped_range.offset = offset;
mem_info->mapped_range.size = size;
mem_info->p_driver_data = *ppData;
}
}
void ValidationStateTracker::RetireFence(VkFence fence) {
auto pFence = GetFenceState(fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->signaler.first != VK_NULL_HANDLE) {
// Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
RetireWorkOnQueue(GetQueueState(pFence->signaler.first), pFence->signaler.second);
} else {
// Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
// the fence as retired.
pFence->state = FENCE_RETIRED;
}
}
}
void ValidationStateTracker::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences,
VkBool32 waitAll, uint64_t timeout, VkResult result) {
if (VK_SUCCESS != result) return;
// When we know that all fences are complete we can clean/remove their CBs
if ((VK_TRUE == waitAll) || (1 == fenceCount)) {
for (uint32_t i = 0; i < fenceCount; i++) {
RetireFence(pFences[i]);
}
}
// NOTE : Alternate case not handled here is when some fences have completed. In
// this case for app to guarantee which fences completed it will have to call
// vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
}
void ValidationStateTracker::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
if (VK_SUCCESS != result) return;
RetireFence(fence);
}
void ValidationStateTracker::RecordGetDeviceQueueState(uint32_t queue_family_index, VkQueue queue) {
// Add queue to tracking set only if it is new
auto queue_is_new = queues.emplace(queue);
if (queue_is_new.second == true) {
QUEUE_STATE *queue_state = &queueMap[queue];
queue_state->queue = queue;
queue_state->queueFamilyIndex = queue_family_index;
queue_state->seq = 0;
}
}
void ValidationStateTracker::PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) {
RecordGetDeviceQueueState(queueFamilyIndex, *pQueue);
}
void ValidationStateTracker::PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
RecordGetDeviceQueueState(pQueueInfo->queueFamilyIndex, *pQueue);
}
void ValidationStateTracker::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
if (VK_SUCCESS != result) return;
QUEUE_STATE *queue_state = GetQueueState(queue);
RetireWorkOnQueue(queue_state, queue_state->seq + queue_state->submissions.size());
}
void ValidationStateTracker::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
if (VK_SUCCESS != result) return;
for (auto &queue : queueMap) {
RetireWorkOnQueue(&queue.second, queue.second.seq + queue.second.submissions.size());
}
}
void ValidationStateTracker::PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
if (!fence) return;
auto fence_state = GetFenceState(fence);
fence_state->destroyed = true;
fenceMap.erase(fence);
}
void ValidationStateTracker::PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) {
if (!semaphore) return;
auto semaphore_state = GetSemaphoreState(semaphore);
semaphore_state->destroyed = true;
semaphoreMap.erase(semaphore);
}
void ValidationStateTracker::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
if (!event) return;
EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
InvalidateCommandBuffers(event_state->cb_bindings, obj_struct);
eventMap.erase(event);
}
void ValidationStateTracker::PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) {
if (!queryPool) return;
QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
InvalidateCommandBuffers(qp_state->cb_bindings, obj_struct);
qp_state->destroyed = true;
queryPoolMap.erase(queryPool);
}
// Object with given handle is being bound to memory w/ given mem_info struct.
// Track the newly bound memory range with given memoryOffset
// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
// and non-linear range incorrectly overlap.
// Return true if an error is flagged and the user callback returns "true", otherwise false
// is_image indicates an image object, otherwise handle is for a buffer
// is_linear indicates a buffer or linear image
void ValidationStateTracker::InsertMemoryRange(const VulkanTypedHandle &typed_handle, DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_linear) {
if (typed_handle.type == kVulkanObjectTypeImage) {
mem_info->bound_images.insert(typed_handle.Cast<VkImage>());
} else if (typed_handle.type == kVulkanObjectTypeBuffer) {
mem_info->bound_buffers.insert(typed_handle.handle);
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
mem_info->bound_acceleration_structures.insert(typed_handle.handle);
} else {
// Unsupported object type
assert(false);
}
}
void ValidationStateTracker::InsertImageMemoryRange(VkImage image, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
VkMemoryRequirements mem_reqs, bool is_linear) {
InsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, mem_reqs, is_linear);
}
void ValidationStateTracker::InsertBufferMemoryRange(VkBuffer buffer, DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const VkMemoryRequirements &mem_reqs) {
InsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, mem_reqs, true);
}
void ValidationStateTracker::InsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const VkMemoryRequirements &mem_reqs) {
InsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset, mem_reqs, true);
}
// This function will remove the handle-to-index mapping from the appropriate map.
static void RemoveMemoryRange(const VulkanTypedHandle &typed_handle, DEVICE_MEMORY_STATE *mem_info) {
if (typed_handle.type == kVulkanObjectTypeImage) {
mem_info->bound_images.erase(typed_handle.Cast<VkImage>());
} else if (typed_handle.type == kVulkanObjectTypeBuffer) {
mem_info->bound_buffers.erase(typed_handle.handle);
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
mem_info->bound_acceleration_structures.erase(typed_handle.handle);
} else {
// Unsupported object type
assert(false);
}
}
void ValidationStateTracker::RemoveBufferMemoryRange(VkBuffer buffer, DEVICE_MEMORY_STATE *mem_info) {
RemoveMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info);
}
void ValidationStateTracker::RemoveImageMemoryRange(VkImage image, DEVICE_MEMORY_STATE *mem_info) {
RemoveMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info);
}
void ValidationStateTracker::RemoveAccelerationStructureMemoryRange(VkAccelerationStructureNV as, DEVICE_MEMORY_STATE *mem_info) {
RemoveMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info);
}
void ValidationStateTracker::UpdateBindBufferMemoryState(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
BUFFER_STATE *buffer_state = GetBufferState(buffer);
if (buffer_state) {
// Track bound memory range information
auto mem_info = GetDevMemState(mem);
if (mem_info) {
InsertBufferMemoryRange(buffer, mem_info, memoryOffset, buffer_state->requirements);
}
// Track objects tied to memory
SetMemBinding(mem, buffer_state, memoryOffset, VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer));
}
}
void ValidationStateTracker::PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset, VkResult result) {
if (VK_SUCCESS != result) return;
UpdateBindBufferMemoryState(buffer, mem, memoryOffset);
}
void ValidationStateTracker::PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
}
}
void ValidationStateTracker::PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindBufferMemoryState(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
}
}
void ValidationStateTracker::RecordGetBufferMemoryRequirementsState(VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
BUFFER_STATE *buffer_state = GetBufferState(buffer);
if (buffer_state) {
buffer_state->requirements = *pMemoryRequirements;
buffer_state->memory_requirements_checked = true;
}
}
void ValidationStateTracker::PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
RecordGetBufferMemoryRequirementsState(buffer, pMemoryRequirements);
}
void ValidationStateTracker::PostCallRecordGetBufferMemoryRequirements2(VkDevice device,
const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
void ValidationStateTracker::PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device,
const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
RecordGetBufferMemoryRequirementsState(pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
void ValidationStateTracker::RecordGetImageMemoryRequiementsState(VkImage image, VkMemoryRequirements *pMemoryRequirements) {
IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
image_state->requirements = *pMemoryRequirements;
image_state->memory_requirements_checked = true;
}
}
void ValidationStateTracker::PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image,
VkMemoryRequirements *pMemoryRequirements) {
RecordGetImageMemoryRequiementsState(image, pMemoryRequirements);
}
void ValidationStateTracker::PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements);
}
void ValidationStateTracker::PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device,
const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
RecordGetImageMemoryRequiementsState(pInfo->image, &pMemoryRequirements->memoryRequirements);
}
static void RecordGetImageSparseMemoryRequirementsState(IMAGE_STATE *image_state,
VkSparseImageMemoryRequirements *sparse_image_memory_requirements) {
image_state->sparse_requirements.emplace_back(*sparse_image_memory_requirements);
if (sparse_image_memory_requirements->formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
image_state->sparse_metadata_required = true;
}
}
void ValidationStateTracker::PostCallRecordGetImageSparseMemoryRequirements(
VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
auto image_state = GetImageState(image);
image_state->get_sparse_reqs_called = true;
if (!pSparseMemoryRequirements) return;
for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i]);
}
}
void ValidationStateTracker::PostCallRecordGetImageSparseMemoryRequirements2(
VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
auto image_state = GetImageState(pInfo->image);
image_state->get_sparse_reqs_called = true;
if (!pSparseMemoryRequirements) return;
for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it
RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements);
}
}
void ValidationStateTracker::PostCallRecordGetImageSparseMemoryRequirements2KHR(
VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
auto image_state = GetImageState(pInfo->image);
image_state->get_sparse_reqs_called = true;
if (!pSparseMemoryRequirements) return;
for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
assert(!pSparseMemoryRequirements[i].pNext); // TODO: If an extension is ever added here we need to handle it
RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements);
}
}
void ValidationStateTracker::PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
const VkAllocationCallbacks *pAllocator) {
if (!shaderModule) return;
auto shader_module_state = GetShaderModuleState(shaderModule);
shader_module_state->destroyed = true;
shaderModuleMap.erase(shaderModule);
}
void ValidationStateTracker::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) {
if (!pipeline) return;
PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct);
pipeline_state->destroyed = true;
pipelineMap.erase(pipeline);
}
void ValidationStateTracker::PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
const VkAllocationCallbacks *pAllocator) {
if (!pipelineLayout) return;
auto pipeline_layout_state = GetPipelineLayout(pipelineLayout);
pipeline_layout_state->destroyed = true;
pipelineLayoutMap.erase(pipelineLayout);
}
void ValidationStateTracker::PreCallRecordDestroySampler(VkDevice device, VkSampler sampler,
const VkAllocationCallbacks *pAllocator) {
if (!sampler) return;
SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
// Any bound cmd buffers are now invalid
if (sampler_state) {
InvalidateCommandBuffers(sampler_state->cb_bindings, obj_struct);
}
sampler_state->destroyed = true;
samplerMap.erase(sampler);
}
void ValidationStateTracker::PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks *pAllocator) {
if (!descriptorSetLayout) return;
auto layout_it = descriptorSetLayoutMap.find(descriptorSetLayout);
if (layout_it != descriptorSetLayoutMap.end()) {
layout_it->second.get()->destroyed = true;
descriptorSetLayoutMap.erase(layout_it);
}
}
void ValidationStateTracker::PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
if (!descriptorPool) return;
DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
if (desc_pool_state) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct);
// Free sets that were in this pool
for (auto ds : desc_pool_state->sets) {
FreeDescriptorSet(ds);
}
desc_pool_state->destroyed = true;
descriptorPoolMap.erase(descriptorPool);
}
}
// Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
void ValidationStateTracker::FreeCommandBufferStates(COMMAND_POOL_STATE *pool_state, const uint32_t command_buffer_count,
const VkCommandBuffer *command_buffers) {
for (uint32_t i = 0; i < command_buffer_count; i++) {
auto cb_state = GetCBState(command_buffers[i]);
// Remove references to command buffer's state and delete
if (cb_state) {
// reset prior to delete, removing various references to it.
// TODO: fix this, it's insane.
ResetCommandBufferState(cb_state->commandBuffer);
// Remove the cb_state's references from COMMAND_POOL_STATEs
pool_state->commandBuffers.erase(command_buffers[i]);
// Remove the cb debug labels
EraseCmdDebugUtilsLabel(report_data, cb_state->commandBuffer);
// Remove CBState from CB map
cb_state->destroyed = true;
commandBufferMap.erase(cb_state->commandBuffer);
}
}
}
void ValidationStateTracker::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool,
uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
auto pPool = GetCommandPoolState(commandPool);
FreeCommandBufferStates(pPool, commandBufferCount, pCommandBuffers);
}
void ValidationStateTracker::PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool,
VkResult result) {
if (VK_SUCCESS != result) return;
auto cmd_pool_state = std::make_shared<COMMAND_POOL_STATE>();
cmd_pool_state->createFlags = pCreateInfo->flags;
cmd_pool_state->queueFamilyIndex = pCreateInfo->queueFamilyIndex;
commandPoolMap[*pCommandPool] = std::move(cmd_pool_state);
}
void ValidationStateTracker::PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool,
VkResult result) {
if (VK_SUCCESS != result) return;
auto query_pool_state = std::make_shared<QUERY_POOL_STATE>();
query_pool_state->createInfo = *pCreateInfo;
query_pool_state->pool = *pQueryPool;
if (pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
const auto *perf = lvl_find_in_chain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext);
const QUEUE_FAMILY_PERF_COUNTERS &counters = *physical_device_state->perf_counters[perf->queueFamilyIndex];
for (uint32_t i = 0; i < perf->counterIndexCount; i++) {
const auto &counter = counters.counters[perf->pCounterIndices[i]];
switch (counter.scope) {
case VK_QUERY_SCOPE_COMMAND_BUFFER_KHR:
query_pool_state->has_perf_scope_command_buffer = true;
break;
case VK_QUERY_SCOPE_RENDER_PASS_KHR:
query_pool_state->has_perf_scope_render_pass = true;
break;
default:
break;
}
}
DispatchGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(physical_device_state->phys_device, perf,
&query_pool_state->n_performance_passes);
}
queryPoolMap[*pQueryPool] = std::move(query_pool_state);
QueryObject query_obj{*pQueryPool, 0u};
for (uint32_t i = 0; i < pCreateInfo->queryCount; ++i) {
query_obj.query = i;
queryToStateMap[query_obj] = QUERYSTATE_UNKNOWN;
}
}
void ValidationStateTracker::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) {
if (!commandPool) return;
COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
// Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
// "When a pool is destroyed, all command buffers allocated from the pool are freed."
if (cp_state) {
// Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
FreeCommandBufferStates(cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
cp_state->destroyed = true;
commandPoolMap.erase(commandPool);
}
}
void ValidationStateTracker::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool,
VkCommandPoolResetFlags flags, VkResult result) {
if (VK_SUCCESS != result) return;
// Reset all of the CBs allocated from this pool
auto command_pool_state = GetCommandPoolState(commandPool);
for (auto cmdBuffer : command_pool_state->commandBuffers) {
ResetCommandBufferState(cmdBuffer);
}
}
void ValidationStateTracker::PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences,
VkResult result) {
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceState(pFences[i]);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_UNSIGNALED;
} else if (pFence->scope == kSyncScopeExternalTemporary) {
pFence->scope = kSyncScopeInternal;
}
}
}
}
// For given cb_nodes, invalidate them and track object causing invalidation.
// InvalidateCommandBuffers and InvalidateLinkedCommandBuffers are essentially
// the same, except one takes a map and one takes a set, and InvalidateCommandBuffers
// can also unlink objects from command buffers.
void ValidationStateTracker::InvalidateCommandBuffers(small_unordered_map<CMD_BUFFER_STATE *, int, 8> &cb_nodes,
const VulkanTypedHandle &obj, bool unlink) {
for (const auto &cb_node_pair : cb_nodes) {
auto &cb_node = cb_node_pair.first;
if (cb_node->state == CB_RECORDING) {
cb_node->state = CB_INVALID_INCOMPLETE;
} else if (cb_node->state == CB_RECORDED) {
cb_node->state = CB_INVALID_COMPLETE;
}
cb_node->broken_bindings.push_back(obj);
// if secondary, then propagate the invalidation to the primaries that will call us.
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateLinkedCommandBuffers(cb_node->linkedCommandBuffers, obj);
}
if (unlink) {
int index = cb_node_pair.second;
assert(cb_node->object_bindings[index] == obj);
cb_node->object_bindings[index] = VulkanTypedHandle();
}
}
if (unlink) {
cb_nodes.clear();
}
}
void ValidationStateTracker::InvalidateLinkedCommandBuffers(std::unordered_set<CMD_BUFFER_STATE *> &cb_nodes,
const VulkanTypedHandle &obj) {
for (auto cb_node : cb_nodes) {
if (cb_node->state == CB_RECORDING) {
cb_node->state = CB_INVALID_INCOMPLETE;
} else if (cb_node->state == CB_RECORDED) {
cb_node->state = CB_INVALID_COMPLETE;
}
cb_node->broken_bindings.push_back(obj);
// if secondary, then propagate the invalidation to the primaries that will call us.
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateLinkedCommandBuffers(cb_node->linkedCommandBuffers, obj);
}
}
}
void ValidationStateTracker::PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) {
if (!framebuffer) return;
FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
InvalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct);
framebuffer_state->destroyed = true;
frameBufferMap.erase(framebuffer);
}
void ValidationStateTracker::PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) {
if (!renderPass) return;
RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
InvalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
rp_state->destroyed = true;
renderPassMap.erase(renderPass);
}
void ValidationStateTracker::PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFence *pFence, VkResult result) {
if (VK_SUCCESS != result) return;
auto fence_state = std::make_shared<FENCE_STATE>();
fence_state->fence = *pFence;
fence_state->createInfo = *pCreateInfo;
fence_state->state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
fenceMap[*pFence] = std::move(fence_state);
}
bool ValidationStateTracker::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) const {
// Set up the state that CoreChecks, gpu_validation and later StateTracker Record will use.
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
cgpl_state->pCreateInfos = pCreateInfos; // GPU validation can alter this, so we have to set a default value for the Chassis
cgpl_state->pipe_state.reserve(count);
for (uint32_t i = 0; i < count; i++) {
cgpl_state->pipe_state.push_back(std::make_shared<PIPELINE_STATE>());
(cgpl_state->pipe_state)[i]->initGraphicsPipeline(this, &pCreateInfos[i], GetRenderPassShared(pCreateInfos[i].renderPass));
(cgpl_state->pipe_state)[i]->pipeline_layout = GetPipelineLayoutShared(pCreateInfos[i].layout);
}
return false;
}
void ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *cgpl_state_data) {
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
// This API may create pipelines regardless of the return value
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(cgpl_state->pipe_state)[i]->pipeline = pPipelines[i];
pipelineMap[pPipelines[i]] = std::move((cgpl_state->pipe_state)[i]);
}
}
cgpl_state->pipe_state.clear();
}
bool ValidationStateTracker::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) const {
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
ccpl_state->pCreateInfos = pCreateInfos; // GPU validation can alter this, so we have to set a default value for the Chassis
ccpl_state->pipe_state.reserve(count);
for (uint32_t i = 0; i < count; i++) {
// Create and initialize internal tracking data structure
ccpl_state->pipe_state.push_back(std::make_shared<PIPELINE_STATE>());
ccpl_state->pipe_state.back()->initComputePipeline(this, &pCreateInfos[i]);
ccpl_state->pipe_state.back()->pipeline_layout = GetPipelineLayoutShared(pCreateInfos[i].layout);
}
return false;
}
void ValidationStateTracker::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *ccpl_state_data) {
create_compute_pipeline_api_state *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
// This API may create pipelines regardless of the return value
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(ccpl_state->pipe_state)[i]->pipeline = pPipelines[i];
pipelineMap[pPipelines[i]] = std::move((ccpl_state->pipe_state)[i]);
}
}
ccpl_state->pipe_state.clear();
}
bool ValidationStateTracker::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache,
uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines, void *crtpl_state_data) const {
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
crtpl_state->pipe_state.reserve(count);
for (uint32_t i = 0; i < count; i++) {
// Create and initialize internal tracking data structure
crtpl_state->pipe_state.push_back(std::make_shared<PIPELINE_STATE>());
crtpl_state->pipe_state.back()->initRayTracingPipelineNV(this, &pCreateInfos[i]);
crtpl_state->pipe_state.back()->pipeline_layout = GetPipelineLayoutShared(pCreateInfos[i].layout);
}
return false;
}
void ValidationStateTracker::PostCallRecordCreateRayTracingPipelinesNV(
VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *crtpl_state_data) {
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
// This API may create pipelines regardless of the return value
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(crtpl_state->pipe_state)[i]->pipeline = pPipelines[i];
pipelineMap[pPipelines[i]] = std::move((crtpl_state->pipe_state)[i]);
}
}
crtpl_state->pipe_state.clear();
}
void ValidationStateTracker::PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler,
VkResult result) {
samplerMap[*pSampler] = std::make_shared<SAMPLER_STATE>(pSampler, pCreateInfo);
}
void ValidationStateTracker::PostCallRecordCreateDescriptorSetLayout(VkDevice device,
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout, VkResult result) {
if (VK_SUCCESS != result) return;
descriptorSetLayoutMap[*pSetLayout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(pCreateInfo, *pSetLayout);
}
// For repeatable sorting, not very useful for "memory in range" search
struct PushConstantRangeCompare {
bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
if (lhs->offset == rhs->offset) {
if (lhs->size == rhs->size) {
// The comparison is arbitrary, but avoids false aliasing by comparing all fields.
return lhs->stageFlags < rhs->stageFlags;
}
// If the offsets are the same then sorting by the end of range is useful for validation
return lhs->size < rhs->size;
}
return lhs->offset < rhs->offset;
}
};
static