blob: 1f7fdc3416000b884c2eed8e3b6122334a11f113 [file] [log] [blame]
/* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Shannon McPherson <shannon@lunarg.com>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <cmath>
#include <set>
#include <sstream>
#include <string>
#include "vk_enum_string_helper.h"
#include "vk_format_utils.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "chassis.h"
#include "core_validation.h"
#include "shader_validation.h"
#include "descriptor_sets.h"
#include "buffer_validation.h"
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &subresource_layers) {
VkImageSubresourceRange subresource_range;
subresource_range.aspectMask = subresource_layers.aspectMask;
subresource_range.baseArrayLayer = subresource_layers.baseArrayLayer;
subresource_range.layerCount = subresource_layers.layerCount;
subresource_range.baseMipLevel = subresource_layers.mipLevel;
subresource_range.levelCount = 1;
return subresource_range;
}
IMAGE_STATE::IMAGE_STATE(VkImage img, const VkImageCreateInfo *pCreateInfo)
: image(img),
createInfo(*pCreateInfo),
valid(false),
acquired(false),
shared_presentable(false),
layout_locked(false),
get_sparse_reqs_called(false),
sparse_metadata_required(false),
sparse_metadata_bound(false),
imported_ahb(false),
has_ahb_format(false),
ahb_format(0),
full_range{},
create_from_swapchain(VK_NULL_HANDLE),
bind_swapchain(VK_NULL_HANDLE),
bind_swapchain_imageIndex(0),
sparse_requirements{} {
if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
}
createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
}
if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
sparse = true;
}
const auto format = createInfo.format;
VkImageSubresourceRange init_range{0, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
if (FormatIsColor(format) || FormatIsMultiplane(format)) {
init_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Normalization will expand this for multiplane
} else {
init_range.aspectMask =
(FormatHasDepth(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) | (FormatHasStencil(format) ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
}
full_range = NormalizeSubresourceRange(*this, init_range);
auto *externalMemoryInfo = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(pCreateInfo->pNext);
if (externalMemoryInfo) {
external_memory_handle = externalMemoryInfo->handleTypes;
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
auto external_format = lvl_find_in_chain<VkExternalFormatANDROID>(createInfo.pNext);
if (external_format) {
external_format_android = external_format->externalFormat;
} else {
// If externalFormat is zero, the effect is as if the VkExternalFormatANDROID structure was not present.
external_format_android = 0;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
}
bool IMAGE_STATE::IsCreateInfoEqual(const VkImageCreateInfo &other_createInfo) const {
bool is_equal = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_equal = is_equal && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_equal = is_equal && IsMipLevelsEqual(other_createInfo) && IsArrayLayersEqual(other_createInfo);
is_equal = is_equal && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_equal = is_equal && IsExtentEqual(other_createInfo) && IsTilingEqual(other_createInfo);
is_equal = is_equal && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
return is_equal && IsQueueFamilyIndicesEqual(other_createInfo);
}
// Check image compatibility rules for VK_NV_dedicated_allocation_image_aliasing
bool IMAGE_STATE::IsCreateInfoDedicatedAllocationImageAliasingCompatible(const VkImageCreateInfo &other_createInfo) const {
bool is_compatible = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_compatible = is_compatible && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_compatible = is_compatible && IsMipLevelsEqual(other_createInfo);
is_compatible = is_compatible && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_compatible = is_compatible && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
is_compatible = is_compatible && IsQueueFamilyIndicesEqual(other_createInfo) && IsTilingEqual(other_createInfo);
is_compatible = is_compatible && createInfo.extent.width <= other_createInfo.extent.width &&
createInfo.extent.height <= other_createInfo.extent.height &&
createInfo.extent.depth <= other_createInfo.extent.depth &&
createInfo.arrayLayers <= other_createInfo.arrayLayers;
return is_compatible;
}
bool IMAGE_STATE::IsCompatibleAliasing(IMAGE_STATE *other_image_state) {
if (!(createInfo.flags & other_image_state->createInfo.flags & VK_IMAGE_CREATE_ALIAS_BIT)) return false;
if ((create_from_swapchain == VK_NULL_HANDLE) && (binding.mem == other_image_state->binding.mem) &&
(binding.mem != VK_NULL_HANDLE) && (binding.offset == other_image_state->binding.offset) &&
IsCreateInfoEqual(other_image_state->createInfo)) {
return true;
}
if ((bind_swapchain == other_image_state->bind_swapchain) && (bind_swapchain != VK_NULL_HANDLE)) {
return true;
}
return false;
}
IMAGE_VIEW_STATE::IMAGE_VIEW_STATE(const std::shared_ptr<IMAGE_STATE> &im, VkImageView iv, const VkImageViewCreateInfo *ci)
: image_view(iv),
create_info(*ci),
normalized_subresource_range(ci->subresourceRange),
samplerConversion(VK_NULL_HANDLE),
image_state(im) {
auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info.pNext);
if (conversionInfo) samplerConversion = conversionInfo->conversion;
if (image_state) {
// A light normalization of the createInfo range
auto &sub_res_range = create_info.subresourceRange;
sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels);
sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers);
// Cache a full normalization (for "full image/whole image" comparisons)
normalized_subresource_range = NormalizeSubresourceRange(*image_state, ci->subresourceRange);
samples = image_state->createInfo.samples;
descriptor_format_bits = DescriptorRequirementsBitsFromFormat(create_info.format);
}
}
uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) {
// uint cast applies floor()
return 1u + (uint32_t)log2(std::max({height, width, depth}));
}
uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); }
uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); }
VkImageSubresourceRange NormalizeSubresourceRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &range) {
const VkImageCreateInfo &image_create_info = image_state.createInfo;
VkImageSubresourceRange norm = range;
norm.levelCount = ResolveRemainingLevels(&range, image_create_info.mipLevels);
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias.
uint32_t layer_limit = (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR))
? image_create_info.extent.depth
: image_create_info.arrayLayers;
norm.layerCount = ResolveRemainingLayers(&range, layer_limit);
// For multiplanar formats, IMAGE_ASPECT_COLOR is equivalent to adding the aspect of the individual planes
VkImageAspectFlags &aspect_mask = norm.aspectMask;
if (FormatIsMultiplane(image_create_info.format)) {
if (aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
aspect_mask &= ~VK_IMAGE_ASPECT_COLOR_BIT;
aspect_mask |= (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT);
if (FormatPlaneCount(image_create_info.format) > 2) {
aspect_mask |= VK_IMAGE_ASPECT_PLANE_2_BIT;
}
}
}
return norm;
}
template <class OBJECT, class LAYOUT>
void CoreChecks::SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
}
template <class OBJECT, class LAYOUT>
void CoreChecks::SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
if (imgpair.subresource.aspectMask & aspectMask) {
imgpair.subresource.aspectMask = aspectMask;
SetLayout(pObject, imgpair, layout);
}
}
// Set the layout in supplied map
void CoreChecks::SetLayout(ImageSubresPairLayoutMap &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout layout) {
auto it = imageLayoutMap.find(imgpair);
if (it != imageLayoutMap.end()) {
it->second.layout = layout; // Update
} else {
imageLayoutMap[imgpair].layout = layout; // Insert
}
}
bool CoreChecks::FindLayoutVerifyLayout(ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) {
return false;
}
if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image),
kVUID_Core_DrawState_InvalidLayout,
"Cannot query for %s layout when combined aspect mask %d has multiple layout types: %s and %s",
report_data->FormatHandle(imgpair.image).c_str(), oldAspectMask, string_VkImageLayout(layout),
string_VkImageLayout(imgsubIt->second.layout));
}
layout = imgsubIt->second.layout;
return true;
}
// Find layout(s) on the global level
bool CoreChecks::FindGlobalLayout(ImageSubresourcePair imgpair, VkImageLayout &layout) {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
bool CoreChecks::FindLayouts(VkImage image, std::vector<VkImageLayout> &layouts) const {
auto sub_data = imageSubresourceMap.find(image);
if (sub_data == imageSubresourceMap.end()) return false;
auto image_state = GetImageState(image);
if (!image_state) return false;
bool ignoreGlobal = false;
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case.
if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
ignoreGlobal = true;
}
for (auto imgsubpair : sub_data->second) {
if (ignoreGlobal && !imgsubpair.hasSubresource) continue;
auto img_data = imageLayoutMap.find(imgsubpair);
if (img_data != imageLayoutMap.end()) {
layouts.push_back(img_data->second.layout);
}
}
return true;
}
bool CoreChecks::FindLayout(const ImageSubresPairLayoutMap &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout &layout,
const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) {
return false;
}
layout = imgsubIt->second.layout;
return true;
}
// find layout in supplied map
bool CoreChecks::FindLayout(const ImageSubresPairLayoutMap &imageLayoutMap, ImageSubresourcePair imgpair,
VkImageLayout &layout) const {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
// Image+subresource not found, look for image handle w/o subresource
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
// Set the layout on the global level
void CoreChecks::SetGlobalLayout(ImageSubresourcePair imgpair, const VkImageLayout &layout) {
VkImage &image = imgpair.image;
auto data = imageLayoutMap.find(imgpair);
if (data != imageLayoutMap.end()) {
data->second.layout = layout; // Update
} else {
imageLayoutMap[imgpair].layout = layout; // Insert
}
auto &image_subresources = imageSubresourceMap[image];
auto subresource = std::find(image_subresources.begin(), image_subresources.end(), imgpair);
if (subresource == image_subresources.end()) {
image_subresources.push_back(imgpair);
}
}
// Set image layout for given VkImageSubresourceRange struct
void CoreChecks::SetImageLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &image_subresource_range, VkImageLayout layout,
VkImageLayout expected_layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map); // the non-const getter must return a valid pointer
if (subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout)) {
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
}
}
// Set the initial image layout for all slices of an image view
void CoreChecks::SetImageViewInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) {
if (disabled.image_layout_validation) {
return;
}
IMAGE_STATE *image_state = view_state.image_state.get();
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, view_state.normalized_subresource_range, layout, &view_state);
}
// Set the initial image layout for a passed non-normalized subresource range
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &range, VkImageLayout layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(image_state, range), layout);
}
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, VkImage image, const VkImageSubresourceRange &range,
VkImageLayout layout) {
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) return;
SetImageInitialLayout(cb_node, *image_state, range, layout);
};
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceLayers &layers, VkImageLayout layout) {
SetImageInitialLayout(cb_node, image_state, RangeFromLayers(layers), layout);
}
// Set image layout for all slices of an image view
void CoreChecks::SetImageViewLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout,
VkImageLayout layoutStencil) {
IMAGE_STATE *image_state = view_state.image_state.get();
VkImageSubresourceRange sub_range = view_state.normalized_subresource_range;
// When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of
// the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info.
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state.create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) {
sub_range.baseArrayLayer = 0;
sub_range.layerCount = image_state->createInfo.extent.depth;
}
if (sub_range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) && layoutStencil != kInvalidLayout) {
sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layout);
sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layoutStencil);
} else {
SetImageLayout(cb_node, *image_state, sub_range, layout);
}
}
bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout,
VkImage image, VkImageView image_view,
VkFramebuffer framebuffer, VkRenderPass renderpass,
uint32_t attachment_index, const char *variable_name) const {
bool skip = false;
auto image_state = GetImageState(image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
if (!image_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"Render Pass begin with %s uses %s where pAttachments[%" PRIu32 "] = %s, which refers to an invalid image",
report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(),
attachment_index, report_data->FormatHandle(image_view).c_str());
return skip;
}
auto image_usage = image_state->createInfo.usage;
// Check for layouts that mismatch image usages in the framebuffer
if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
!(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (device_extensions.vk_khr_maintenance2) {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
} else {
// The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here.
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdBeginRenderPass-initialLayout-00896",
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, const CMD_BUFFER_STATE *pCB,
const VkRenderPassBeginInfo *pRenderPassBegin,
const FRAMEBUFFER_STATE *framebuffer_state) const {
bool skip = false;
auto const pRenderPassInfo = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebufferInfo = framebuffer_state->createInfo;
const VkImageView *attachments = framebufferInfo.pAttachments;
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass;
auto framebuffer = framebuffer_state->framebuffer;
if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using a framebuffer with a different number of attachments.");
}
const auto *attachmentInfo = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBegin->pNext);
if (((framebufferInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) && attachmentInfo != nullptr) {
attachments = attachmentInfo->pAttachments;
}
if (attachments != nullptr) {
const auto *const_pCB = static_cast<const CMD_BUFFER_STATE *>(pCB);
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
auto image_view = attachments[i];
auto view_state = GetImageViewState(image_view);
if (!view_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle",
report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i,
report_data->FormatHandle(image_view).c_str());
continue;
}
const VkImage image = view_state->create_info.image;
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s references non-extant %s.",
report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i,
report_data->FormatHandle(image_view).c_str(), report_data->FormatHandle(image).c_str());
continue;
}
auto attachment_initial_layout = pRenderPassInfo->pAttachments[i].initialLayout;
auto final_layout = pRenderPassInfo->pAttachments[i].finalLayout;
// Cast pCB to const because we don't want to create entries that don't exist here (in case the key changes to something
// in common with the non-const version.)
const ImageSubresourceLayoutMap *subresource_map =
(attachment_initial_layout != VK_IMAGE_LAYOUT_UNDEFINED) ? GetImageSubresourceLayoutMap(const_pCB, image) : nullptr;
if (subresource_map) { // If no layout information for image yet, will be checked at QueueSubmit time
LayoutUseCheckAndMessage layout_check(subresource_map);
bool subres_skip = false;
auto subresource_cb = [this, i, attachment_initial_layout, &layout_check, &subres_skip](
const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, attachment_initial_layout, layout, initial_layout)) {
subres_skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using attachment %u where the render pass initial layout is %s "
"and the %s layout of the attachment is %s. The layouts must match, or the render "
"pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED",
i, string_VkImageLayout(attachment_initial_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
return !subres_skip; // quit checking subresources once we fail once
};
subresource_map->ForRange(view_state->normalized_subresource_range, subresource_cb);
skip |= subres_skip;
}
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_initial_layout, image, image_view,
framebuffer, render_pass, i, "initial layout");
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer,
render_pass, i, "final layout");
}
for (uint32_t j = 0; j < pRenderPassInfo->subpassCount; ++j) {
auto &subpass = pRenderPassInfo->pSubpasses[j];
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].inputAttachmentCount; ++k) {
auto &attachment_ref = subpass.pInputAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].colorAttachmentCount; ++k) {
auto &attachment_ref = subpass.pColorAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"color attachment layout");
if (subpass.pResolveAttachments) {
ValidateRenderPassLayoutAgainstFramebufferImageUsage(
rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass,
attachment_ref.attachment, "resolve attachment layout");
}
}
}
}
if (pRenderPassInfo->pSubpasses[j].pDepthStencilAttachment) {
auto &attachment_ref = *subpass.pDepthStencilAttachment;
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
}
}
return skip;
}
void CoreChecks::TransitionAttachmentRefLayout(CMD_BUFFER_STATE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
const safe_VkAttachmentReference2KHR &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = GetAttachmentImageViewState(pFramebuffer, ref.attachment);
if (image_view) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_reference_stencil_layout = lvl_find_in_chain<VkAttachmentReferenceStencilLayoutKHR>(ref.pNext);
if (attachment_reference_stencil_layout) {
stencil_layout = attachment_reference_stencil_layout->stencilLayout;
}
SetImageViewLayout(pCB, *image_view, ref.layout, stencil_layout);
}
}
}
void CoreChecks::TransitionSubpassLayouts(CMD_BUFFER_STATE *pCB, const RENDER_PASS_STATE *render_pass_state,
const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]);
}
if (subpass.pDepthStencilAttachment) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment);
}
}
}
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
void CoreChecks::TransitionBeginRenderPassLayouts(CMD_BUFFER_STATE *cb_state, const RENDER_PASS_STATE *render_pass_state,
FRAMEBUFFER_STATE *framebuffer_state) {
// First transition into initialLayout
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
auto view_state = GetAttachmentImageViewState(framebuffer_state, i);
if (view_state) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_description_stencil_layout =
lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(rpci->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
stencil_layout = attachment_description_stencil_layout->stencilInitialLayout;
}
SetImageViewLayout(cb_state, *view_state, rpci->pAttachments[i].initialLayout, stencil_layout);
}
}
// Now transition for first subpass (index 0)
TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state);
}
bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
if (!FormatHasDepth(format)) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
if (!FormatHasStencil(format)) return false;
}
if (0 !=
(aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR))) {
if (FormatPlaneCount(format) == 1) return false;
}
return true;
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
bool CoreChecks::ValidateBarrierLayoutToImageUsage(const VkImageMemoryBarrier &img_barrier, bool new_not_old,
VkImageUsageFlags usage_flags, const char *func_name,
const char *barrier_pname) const {
bool skip = false;
const VkImageLayout layout = (new_not_old) ? img_barrier.newLayout : img_barrier.oldLayout;
const char *msg_code = kVUIDUndefined; // sentinel value meaning "no error"
switch (layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01208";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01209";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01210";
}
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
if ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01211";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01212";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01213";
}
break;
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
if ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-02088";
}
break;
default:
// Other VkImageLayout values do not have VUs defined in this context.
break;
}
if (msg_code != kVUIDUndefined) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), msg_code,
"%s: Image barrier %s %s Layout=%s is not compatible with %s usage flags 0x%" PRIx32 ".", func_name,
barrier_pname, ((new_not_old) ? "new" : "old"), string_VkImageLayout(layout),
report_data->FormatHandle(img_barrier.image).c_str(), usage_flags);
}
return skip;
}
// Verify image barriers are compatible with the images they reference.
bool CoreChecks::ValidateBarriersToImages(const CMD_BUFFER_STATE *cb_state, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) const {
bool skip = false;
// Scoreboard for checking for duplicate and inconsistent barriers to images
struct ImageBarrierScoreboardEntry {
uint32_t index;
// This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is
// required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored)
const VkImageMemoryBarrier *barrier;
};
using ImageBarrierScoreboardSubresMap = std::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry>;
using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Scoreboard for duplicate layout transition barriers within the list
// Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call)
ImageBarrierScoreboardImageMap layout_transitions;
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
const auto &img_barrier = pImageMemoryBarriers[i];
const std::string barrier_pname = "pImageMemoryBarrier[" + std::to_string(i) + "]";
// Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource
// TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts
// at a per sub-resource level
if (img_barrier.oldLayout != img_barrier.newLayout) {
const ImageBarrierScoreboardEntry new_entry{i, &img_barrier};
const auto image_it = layout_transitions.find(img_barrier.image);
if (image_it != layout_transitions.end()) {
auto &subres_map = image_it->second;
auto subres_it = subres_map.find(img_barrier.subresourceRange);
if (subres_it != subres_map.end()) {
auto &entry = subres_it->second;
if ((entry.barrier->newLayout != img_barrier.oldLayout) &&
(img_barrier.oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier.subresourceRange;
skip = log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197",
"%s: %s conflicts with earlier entry pImageMemoryBarrier[%u]. %s"
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
func_name, barrier_pname.c_str(), entry.index, report_data->FormatHandle(img_barrier.image).c_str(),
range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount,
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(entry.barrier->newLayout));
}
entry = new_entry;
} else {
subres_map[img_barrier.subresourceRange] = new_entry;
}
} else {
layout_transitions[img_barrier.image][img_barrier.subresourceRange] = new_entry;
}
}
auto image_state = GetImageState(img_barrier.image);
if (image_state) {
VkImageUsageFlags usage_flags = image_state->createInfo.usage;
skip |= ValidateBarrierLayoutToImageUsage(img_barrier, false, usage_flags, func_name, barrier_pname.c_str());
skip |= ValidateBarrierLayoutToImageUsage(img_barrier, true, usage_flags, func_name, barrier_pname.c_str());
// Make sure layout is able to be transitioned, currently only presented shared presentable images are locked
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), 0,
"Attempting to transition shared presentable %s"
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
}
const VkImageCreateInfo &image_create_info = image_state->createInfo;
// For a Depth/Stencil image both aspects MUST be set
if (FormatIsDepthAndStencil(image_create_info.format)) {
auto const aspect_mask = img_barrier.subresourceRange.aspectMask;
if (enabled_features.separate_depth_stencil_layouts_features.separateDepthStencilLayouts) {
if (!(aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), "VUID-VkImageMemoryBarrier-image-03319",
"%s: Image barrier %s references %s of format %s that must have either the depth or stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_create_info.format), aspect_mask);
}
} else {
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), "VUID-VkImageMemoryBarrier-image-03320",
"%s: Image barrier %s references %s of format %s that must have the depth and stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_create_info.format), aspect_mask);
}
}
}
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_state, img_barrier.image);
if (img_barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
// Not sure if this needs to be in the ForRange traversal, pulling it out as it is currently invariant with
// subresource.
} else if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, img_barrier.subresourceRange);
auto subres_callback = [this, img_barrier, cb_state, &layout_check, &subres_skip](
const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, img_barrier.oldLayout, layout, initial_layout)) {
subres_skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197",
"For %s you cannot transition the layout of aspect=%d level=%d layer=%d from %s when the "
"%s layout is %s.",
report_data->FormatHandle(img_barrier.image).c_str(), subres.aspectMask, subres.mipLevel,
subres.arrayLayer, string_VkImageLayout(img_barrier.oldLayout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
return !subres_skip;
};
subresource_map->ForRange(normalized_isr, subres_callback);
skip |= subres_skip;
}
}
}
return skip;
}
bool CoreChecks::IsReleaseOp(CMD_BUFFER_STATE *cb_state, const VkImageMemoryBarrier &barrier) const {
if (!IsTransferOp(&barrier)) return false;
auto pool = cb_state->command_pool.get();
return pool && TempIsReleaseOp<VkImageMemoryBarrier, true>(pool, &barrier);
}
template <typename Barrier>
bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state,
uint32_t barrier_count, const Barrier *barriers) const {
using BarrierRecord = QFOTransferBarrier<Barrier>;
bool skip = false;
auto pool = cb_state->command_pool.get();
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
const char *transfer_type = nullptr;
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
const BarrierRecord *barrier_record = nullptr;
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) &&
!QueueFamilyIsSpecial(barriers[b].dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barriers[b]);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
transfer_type = "releasing";
}
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) &&
!QueueFamilyIsSpecial(barriers[b].srcQueueFamilyIndex)) {
const auto found = barrier_sets.acquire.find(barriers[b]);
if (found != barrier_sets.acquire.cend()) {
barrier_record = &(*found);
transfer_type = "acquiring";
}
}
if (barrier_record != nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInCB(),
"%s: %s at index %" PRIu32 " %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier recorded in this command buffer.",
func_name, barrier_name, b, transfer_type, handle_name,
report_data->FormatHandle(barrier_record->handle).c_str(), barrier_record->srcQueueFamilyIndex,
barrier_record->dstQueueFamilyIndex);
}
}
return skip;
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier &barrier) {
return device_state.GetImageState(barrier.image);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
VkBuffer BarrierHandle(const VkBufferMemoryBarrier &barrier) { return barrier.buffer; }
template <typename Barrier>
void CoreChecks::RecordBarrierArrayValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const Barrier *barriers) {
auto pool = cb_state->command_pool.get();
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag());
for (uint32_t b = 0; b < barrier_count; b++) {
auto &barrier = barriers[b];
if (IsTransferOp(&barrier)) {
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barrier) &&
!QueueFamilyIsSpecial(barrier.dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barrier);
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barrier) &&
!QueueFamilyIsSpecial(barrier.srcQueueFamilyIndex)) {
barrier_sets.acquire.emplace(barrier);
}
}
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
if (!QueueFamilyIsIgnored(src_queue_family) && !QueueFamilyIsIgnored(dst_queue_family)) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
auto handle_state = BarrierHandleState(*this, barrier);
bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false;
if (!mode_concurrent) {
const auto typed_handle = BarrierTypedHandle(barrier);
cb_state->queue_submit_functions.emplace_back(
[func_name, cb_state, typed_handle, src_queue_family, dst_queue_family](
const ValidationStateTracker *device_data, const QUEUE_STATE *queue_state) {
return ValidateConcurrentBarrierAtSubmit(device_data, queue_state, func_name, cb_state, typed_handle,
src_queue_family, dst_queue_family);
});
}
}
}
}
bool CoreChecks::ValidateBarriersQFOTransferUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state,
uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
return skip;
}
void CoreChecks::RecordBarrierValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
RecordBarrierArrayValidationInfo(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
RecordBarrierArrayValidationInfo(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
template <typename BarrierRecord, typename Scoreboard>
bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const CMD_BUFFER_STATE *cb_state,
const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) const {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state));
if (!inserted.second && inserted.first->second != cb_state) {
// This is a duplication (but don't report duplicates from the same CB, as we do that at record time
skip = log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInSubmit(),
"%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier submitted in this batch from %s.",
"vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(),
report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex, barrier.dstQueueFamilyIndex,
report_data->FormatHandle(inserted.first->second->commandBuffer).c_str());
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateQueuedQFOTransferBarriers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<Barrier> *scoreboards) const {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
bool skip = false;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
// No release should have an extant duplicate (WARNING)
for (const auto &release : cb_barriers.release) {
// Check the global pending release barriers
const auto set_it = global_release_barriers.find(release.handle);
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
const auto found = set_for_handle.find(release);
if (found != set_for_handle.cend()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOSubmitted(),
"%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier queued for execution, without intervening acquire operation.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(),
found->srcQueueFamilyIndex, found->dstQueueFamilyIndex);
}
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release);
}
// Each acquire must have a matching release (ERROR)
for (const auto &acquire : cb_barriers.acquire) {
const auto set_it = global_release_barriers.find(acquire.handle);
bool matching_release_found = false;
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend();
}
if (!matching_release_found) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgMissingQFOReleaseInSubmit(),
"%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(),
acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex);
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire);
}
return skip;
}
bool CoreChecks::ValidateQueuedQFOTransfers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
bool skip = false;
skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state, qfo_image_scoreboards);
skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state, qfo_buffer_scoreboards);
return skip;
}
template <typename Barrier>
void CoreChecks::RecordQueuedQFOTransferBarriers(CMD_BUFFER_STATE *cb_state) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag());
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
// the global barrier list is mapped by resource handle to allow cleanup on resource destruction
// NOTE: We're using [] because creation of a Set is a needed side effect for new handles
global_release_barriers[release.handle].insert(release);
}
// Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed
for (const auto &acquire : cb_barriers.acquire) {
// NOTE: We're not using [] because we don't want to create entries for missing releases
auto set_it = global_release_barriers.find(acquire.handle);
if (set_it != global_release_barriers.end()) {
QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
set_for_handle.erase(acquire);
if (set_for_handle.size() == 0) { // Clean up empty sets
global_release_barriers.erase(set_it);
}
}
}
}
void CoreChecks::RecordQueuedQFOTransfers(CMD_BUFFER_STATE *cb_state) {
RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state);
RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state);
}
// Avoid making the template globally visible by exporting the one instance of it we need.
void CoreChecks::EraseQFOImageRelaseBarriers(const VkImage &image) { EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); }
void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t memBarrierCount,
const VkImageMemoryBarrier *pImgMemBarriers) {
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pImgMemBarriers[i];
// For ownership transfers, the barrier is specified twice; as a release
// operation on the yielding queue family, and as an acquire operation
// on the acquiring queue family. This barrier may also include a layout
// transition, which occurs 'between' the two operations. For validation
// purposes it doesn't seem important which side performs the layout
// transition, but it must not be performed twice. We'll arbitrarily
// choose to perform it as part of the acquire operation.
if (IsReleaseOp(cb_state, mem_barrier)) {
continue;
}
auto *image_state = GetImageState(mem_barrier.image);
if (!image_state) continue;
RecordTransitionImageLayout(cb_state, image_state, mem_barrier);
for (const auto &image : image_state->aliasing_images) {
image_state = GetImageState(image);
RecordTransitionImageLayout(cb_state, image_state, mem_barrier);
}
}
}
void CoreChecks::RecordTransitionImageLayout(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state,
const VkImageMemoryBarrier &mem_barrier) {
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, mem_barrier.subresourceRange);
const auto &image_create_info = image_state->createInfo;
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts
// for all (potential) layer sub_resources.
if (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) {
normalized_isr.baseArrayLayer = 0;
normalized_isr.layerCount = image_create_info.extent.depth; // Treat each depth slice as a layer subresource
}
SetImageLayout(cb_state, *image_state, normalized_isr, mem_barrier.newLayout, mem_barrier.oldLayout);
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageAspectFlags aspect_mask,
VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller,
const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const {
if (disabled.image_layout_validation) return false;
assert(cb_node);
assert(image_state);
const auto image = image_state->image;
bool skip = false;
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map, aspect_mask);
auto subresource_cb = [this, explicit_layout, cb_node, layout_mismatch_msg_code, caller, image, &layout_check, &error,
&subres_skip](const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, explicit_layout, layout, initial_layout)) {
*error = true;
subres_skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), layout_mismatch_msg_code,
"%s: Cannot use %s (layer=%u mip=%u) with specific layout %s that doesn't match the "
"%s layout %s.",
caller, report_data->FormatHandle(image).c_str(), subres.arrayLayer, subres.mipLevel,
string_VkImageLayout(explicit_layout), layout_check.message, string_VkImageLayout(layout_check.layout));
}
return !subres_skip;
};
subresource_map->ForRange(range, subresource_cb);
skip |= subres_skip;
}
// If optimal_layout is not UNDEFINED, check that layout matches optimal for this case
if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) {
if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer),
kVUID_Core_DrawState_InvalidImageLayout,
"%s: For optimal performance %s layout should be %s instead of GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout));
}
} else if (device_extensions.vk_khr_shared_presentable_image) {
if (image_state->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
layout_invalid_msg_code,
"Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
string_VkImageLayout(optimal_layout));
}
}
} else {
*error = true;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), layout_invalid_msg_code,
"%s: Layout for %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout),
string_VkImageLayout(optimal_layout));
}
}
return skip;
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceLayers &subLayers, VkImageLayout explicit_layout,
VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code,
const char *layout_mismatch_msg_code, bool *error) const {
return VerifyImageLayout(cb_node, image_state, RangeFromLayers(subLayers), explicit_layout, optimal_layout, caller,
layout_invalid_msg_code, layout_mismatch_msg_code, error);
}
void CoreChecks::TransitionFinalSubpassLayouts(CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
FRAMEBUFFER_STATE *framebuffer_state) {
auto renderPass = GetRenderPassState(pRenderPassBegin->renderPass);
if (!renderPass) return;
const VkRenderPassCreateInfo2KHR *pRenderPassInfo = renderPass->createInfo.ptr();
if (framebuffer_state) {
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
auto view_state = GetAttachmentImageViewState(framebuffer_state, i);
if (view_state) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_description_stencil_layout =
lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(pRenderPassInfo->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
stencil_layout = attachment_description_stencil_layout->stencilFinalLayout;
}
SetImageViewLayout(pCB, *view_state, pRenderPassInfo->pAttachments[i].finalLayout, stencil_layout);
}
}
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR
// This could also move into a seperate core_validation_android.cpp file... ?
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
bool skip = false;
const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_fmt_android) {
if (0 != ext_fmt_android->externalFormat) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-01974",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero "
"externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED.");
}
if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02396",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02397",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but usage includes bits other than VK_IMAGE_USAGE_SAMPLED_BIT.");
}
if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02398",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL.");
}
}
if ((0 != ext_fmt_android->externalFormat) && (0 == ahb_ext_formats_set.count(ext_fmt_android->externalFormat))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkExternalFormatANDROID-externalFormat-01894",
"vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat which has "
"not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID().");
}
}
if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-01975",
"vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a "
"chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0.");
}
}
const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
if (create_info->imageType != VK_IMAGE_TYPE_2D) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02393",
"vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo "
"struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
string_VkImageType(create_info->imageType));
}
if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02394",
"vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of "
"handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID "
"specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").",
create_info->mipLevels, FullMipChainLevels(create_info->extent));
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(create_info->image);
if (image_state->has_ahb_format) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02399",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"format member is %s.",
string_VkFormat(create_info->format));
}
// Chain must include a compatible ycbcr conversion
bool conv_found = false;
uint64_t external_format = 0;
const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info->pNext);
if (ycbcr_conv_info != nullptr) {
VkSamplerYcbcrConversion conv_handle = ycbcr_conv_info->conversion;
if (ycbcr_conversion_ahb_fmt_map.find(conv_handle) != ycbcr_conversion_ahb_fmt_map.end()) {
conv_found = true;
external_format = ycbcr_conversion_ahb_fmt_map.at(conv_handle);
}
}
if ((!conv_found) || (external_format != image_state->ahb_format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02400",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"without a chained VkSamplerYcbcrConversionInfo struct with the same external format.");
}
// Errors in create_info swizzles
if ((create_info->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) ||
(create_info->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) ||
(create_info->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) ||
(create_info->components.a != VK_COMPONENT_SWIZZLE_IDENTITY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02401",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"includes one or more non-identity component swizzles.");
}
}
return skip;
}
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-image-01895",
"vkGetImageSubresourceLayout(): Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been "
"bound to memory.");
}
return skip;
}
#else
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
return false;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const { return false; }
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageANDROID(report_data, pCreateInfo);
} else { // These checks are omitted or replaced when Android HW Buffer extension is active
if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-format-00943",
"vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED.");
}
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
if (VK_IMAGE_TYPE_2D != pCreateInfo->imageType) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-00949",
"vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT "
"flag bit is set");
}
if ((pCreateInfo->extent.width != pCreateInfo->extent.height) || (pCreateInfo->arrayLayers < 6)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): If VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set, width (%d) must equal "
"height (%d) and arrayLayers (%d) must be >= 6.",
pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->arrayLayers);
}
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00964",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width exceeds device "
"maxFramebufferWidth.");
}
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00965",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height exceeds device "
"maxFramebufferHeight");
}
if (device_extensions.vk_ext_fragment_density_map) {
uint32_t ceiling_width =
(uint32_t)ceil((float)device_limits->maxFramebufferWidth /
std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, 1.0f));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.width > ceiling_width)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-02559",
"vkCreateImage(): Image usage flags include a fragment density map bit and image width (%u) exceeds the "
"ceiling of device "
"maxFramebufferWidth (%u) / minFragmentDensityTexelSize.width (%u). The ceiling value: %u",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, ceiling_width);
}
uint32_t ceiling_height =
(uint32_t)ceil((float)device_limits->maxFramebufferHeight /
std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, 1.0f));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.height > ceiling_height)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-02560",
"vkCreateImage(): Image usage flags include a fragment density map bit and image height (%u) exceeds the "
"ceiling of device "
"maxFramebufferHeight (%u) / minFragmentDensityTexelSize.height (%u). The ceiling value: %u",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, ceiling_height);
}
}
VkImageFormatProperties format_limits = {};
VkResult result = VK_SUCCESS;
if (pCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
result = DispatchGetPhysicalDeviceImageFormatProperties(physical_device, pCreateInfo->format, pCreateInfo->imageType,
pCreateInfo->tiling, pCreateInfo->usage, pCreateInfo->flags,
&format_limits);
} else {
auto image_format_info = lvl_init_struct<VkPhysicalDeviceImageFormatInfo2>();
auto image_format_properties = lvl_init_struct<VkImageFormatProperties2>();
image_format_info.type = pCreateInfo->imageType;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
}
if (result == VK_ERROR_FORMAT_NOT_SUPPORTED) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (!lvl_find_in_chain<VkExternalFormatANDROID>(pCreateInfo->pNext))
#endif // VK_USE_PLATFORM_ANDROID_KHR
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): Format %s is not supported for this combination of parameters.",
string_VkFormat(pCreateInfo->format));
} else {
if (pCreateInfo->mipLevels > format_limits.maxMipLevels) {
const char *format_string = string_VkFormat(pCreateInfo->format);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-mipLevels-02255",
"vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.",
pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string);
}
uint64_t texel_count = (uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height *
(uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers *
(uint64_t)pCreateInfo->samples;
uint64_t total_size = (uint64_t)std::ceil(FormatTexelSize(pCreateInfo->format) * texel_count);
// Round up to imageGranularity boundary
VkDeviceSize imageGranularity = phys_dev_props.limits.bufferImageGranularity;
uint64_t ig_mask = imageGranularity - 1;
total_size = (total_size + ig_mask) & ~ig_mask;
if (total_size > format_limits.maxResourceSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
kVUID_Core_Image_InvalidFormatLimitsViolation,
"vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64
", maximum resource size = 0x%" PRIxLEAST64 " ",
total_size, format_limits.maxResourceSize);
}
if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-arrayLayers-02256",
"vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.",
pCreateInfo->arrayLayers, format_limits.maxArrayLayers);
}
if (device_extensions.vk_khr_sampler_ycbcr_conversion && FormatRequiresYcbcrConversion(pCreateInfo->format) &&
!device_extensions.vk_ext_ycbcr_image_arrays && pCreateInfo->arrayLayers > 1) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-format-02653",
"vkCreateImage(): arrayLayers=%d exceeds the maximum allowed of 1 for formats requiring sampler ycbcr conversion",
pCreateInfo->arrayLayers);
}
if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-samples-02258", "vkCreateImage(): samples %s is not supported by format 0x%.8X.",
string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts);
}
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) && (!enabled_features.core.sparseResidencyAliased)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-01924",
"vkCreateImage(): the sparseResidencyAliased device feature is disabled: Images cannot be created with the "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT set.");
}
if (device_extensions.vk_khr_maintenance2) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR) {
if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC_LDR(pCreateInfo->format) ||
FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) {
// TODO: Add Maintenance2 VUID
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, "
"format must be block, ETC or ASTC compressed, but is %s",
string_VkFormat(pCreateInfo->format));
}
if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) {
// TODO: Add Maintenance2 VUID
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, "
"flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
}
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateImage",
"pCreateInfo->pQueueFamilyIndices", "VUID-VkImageCreateInfo-sharingMode-01420",
"VUID-VkImageCreateInfo-sharingMode-01420", false);
}
if (!FormatIsMultiplane(pCreateInfo->format) && !(pCreateInfo->flags & VK_IMAGE_CREATE_ALIAS_BIT) &&
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-format-01577",
"vkCreateImage(): format is %s and flags are %s. The flags should not include VK_IMAGE_CREATE_DISJOINT_BIT.",
string_VkFormat(pCreateInfo->format), string_VkImageCreateFlags(pCreateInfo->flags).c_str());
}
return skip;
}
void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
if (VK_SUCCESS != result) return;
StateTracker::PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result);
IMAGE_LAYOUT_STATE image_state;
image_state.layout = pCreateInfo->initialLayout;
image_state.format = pCreateInfo->format;
ImageSubresourcePair subpair{*pImage, false, VkImageSubresource()};
imageSubresourceMap[*pImage].push_back(subpair);
imageLayoutMap[subpair] = image_state;
}
bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) const {
const IMAGE_STATE *image_state = GetImageState(image);
const VulkanTypedHandle obj_struct(image, kVulkanObjectTypeImage);
bool skip = false;
if (image_state) {
skip |= ValidateObjectNotInUse(image_state, obj_struct, "vkDestroyImage", "VUID-vkDestroyImage-image-01000");
}
return skip;
}
void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
// Clean up validation specific data
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image);
const auto &sub_entry = imageSubresourceMap.find(image);
if (sub_entry != imageSubresourceMap.end()) {
for (const auto &pair : sub_entry->second) {
imageLayoutMap.erase(pair);
}
imageSubresourceMap.erase(sub_entry);
}
// Clean up generic image state
StateTracker::PreCallRecordDestroyImage(device, image, pAllocator);
}
bool CoreChecks::ValidateImageAttributes(const IMAGE_STATE *image_state, const VkImageSubresourceRange &range) const {
bool skip = false;
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
char const str[] = "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageAspect, str);
}
if (FormatIsDepthOrStencil(image_state->createInfo.format)) {
char const str[] = "vkCmdClearColorImage called with depth/stencil image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str);
} else if (FormatIsCompressed(image_state->createInfo.format)) {
char const str[] = "vkCmdClearColorImage called with compressed image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str);
}
if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
char const str[] = "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00002", "%s.", str);
}
return skip;
}
uint32_t ResolveRemainingLevels(const VkImageSubresourceRange *range, uint32_t mip_levels) {
// Return correct number of mip levels taking into account VK_REMAINING_MIP_LEVELS
uint32_t mip_level_count = range->levelCount;
if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
mip_level_count = mip_levels - range->baseMipLevel;
}
return mip_level_count;
}
uint32_t ResolveRemainingLayers(const VkImageSubresourceRange *range, uint32_t layers) {
// Return correct number of layers taking into account VK_REMAINING_ARRAY_LAYERS
uint32_t array_layer_count = range->layerCount;
if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
array_layer_count = layers - range->baseArrayLayer;
}
return array_layer_count;
}
bool CoreChecks::VerifyClearImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageLayout dest_image_layout,
const char *func_name) const {
bool skip = false;
if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageLayout,
"%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
}
} else if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR == dest_image_layout) {
if (!device_extensions.vk_khr_shared_presentable_image) {
// TODO: Add unique error id when available.
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), 0,
"Must enable VK_KHR_shared_presentable_image extension before creating images with a layout type "
"of VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.");
} else {
if (image_state->shared_presentable) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), 0,
"Layout for shared presentable cleared image is %s but can only be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
string_VkImageLayout(dest_image_layout));
}
}
} else {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00005";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00012";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), error_code,
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
}
// Cast to const to prevent creation at validate time.
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state->image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, range);
auto subres_callback = [this, cb_node, dest_image_layout, func_name, &layout_check, &subres_skip](
const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, dest_image_layout, layout, initial_layout)) {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
subres_skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"%s: Cannot clear an image whose layout is %s and doesn't match the %s layout %s.",
func_name, string_VkImageLayout(dest_image_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
return !subres_skip;
};
subresource_map->ForRange(normalized_isr, subres_callback);
skip |= subres_skip;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearColorImage()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdClearColorImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |=
ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage",
"VUID-vkCmdClearColorImage-image-01993", "VUID-vkCmdClearColorImage-image-01993");
}
skip |= InsideRenderPass(cb_node, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-renderpass");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= ValidateImageAttributes(image_state, pRanges[i]);
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()");
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-image-00010");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearDepthStencilImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearDepthStencilImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage",
"VUID-vkCmdClearDepthStencilImage-image-01994",
"VUID-vkCmdClearDepthStencilImage-image-01994");
}
skip |= InsideRenderPass(cb_node, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-renderpass");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
// Image aspect must be depth or stencil or both
VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) {
char const str[] =
"vkCmdClearDepthStencilImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_DEPTH_BIT "
"and/or VK_IMAGE_ASPECT_STENCIL_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_InvalidImageAspect, str);
}
}
if (image_state && !FormatIsDepthOrStencil(image_state->createInfo.format)) {
char const str[] = "vkCmdClearDepthStencilImage called without a depth/stencil image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00014", "%s.", str);
}
if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) {
char const str[] =
"vkCmdClearDepthStencilImage() called with an image that was not created with the VK_IMAGE_USAGE_TRANSFER_DST_BIT "
"set.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00009", "%s.", str);
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
// Returns true if [x, xoffset] and [y, yoffset] overlap
static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) {
bool result = false;
uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end));
uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset);
if (intersection_max > intersection_min) {
result = true;
}
return result;
}
// Returns true if source area of first copy region intersects dest area of second region
// It is assumed that these are copy regions within a single image (otherwise no possibility of collision)
static bool RegionIntersects(const VkImageCopy *rgn0, const VkImageCopy *rgn1, VkImageType type, bool is_multiplane) {
bool result = false;
// Separate planes within a multiplane image cannot intersect
if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) {
return result;
}
if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) &&
(RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer,
rgn1->dstSubresource.layerCount))) {
result = true;
switch (type) {
case VK_IMAGE_TYPE_3D:
result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth);
// fall through
case VK_IMAGE_TYPE_2D:
result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height);
// fall through
case VK_IMAGE_TYPE_1D:
result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width);
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
}
return result;
}
// Returns non-zero if offset and extent exceed image extents
static const uint32_t x_bit = 1;
static const uint32_t y_bit = 2;
static const uint32_t z_bit = 4;
static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) {
uint32_t result = 0;
// Extents/depths cannot be negative but checks left in for clarity
if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) ||
((offset->z + static_cast<int32_t>(extent->depth)) < 0)) {
result |= z_bit;
}
if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) ||
((offset->y + static_cast<int32_t>(extent->height)) < 0)) {
result |= y_bit;
}
if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) ||
((offset->x + static_cast<int32_t>(extent->width)) < 0)) {
result |= x_bit;
}
return result;
}
// Test if two VkExtent3D structs are equivalent
static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
bool result = true;
if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
(extent->depth != other_extent->depth)) {
result = false;
}
return result;
}
// For image copies between compressed/uncompressed formats, the extent is provided in source image texels
// Destination image texel extents must be adjusted by block size for the dest validation checks
VkExtent3D GetAdjustedDestImageExtent(VkFormat src_format, VkFormat dst_format, VkExtent3D extent) {
VkExtent3D adjusted_extent = extent;
if ((FormatIsCompressed(src_format) || FormatIsSinglePlane_422(src_format)) &&
!(FormatIsCompressed(dst_format) || FormatIsSinglePlane_422(dst_format))) {
VkExtent3D block_size = FormatTexelBlockExtent(src_format);
adjusted_extent.width /= block_size.width;
adjusted_extent.height /= block_size.height;
adjusted_extent.depth /= block_size.depth;
} else if (!(FormatIsCompressed(src_format) || FormatIsSinglePlane_422(src_format)) &&
(FormatIsCompressed(dst_format) || FormatIsSinglePlane_422(dst_format))) {
VkExtent3D block_size = FormatTexelBlockExtent(dst_format);
adjusted_extent.width *= block_size.width;
adjusted_extent.height *= block_size.height;
adjusted_extent.depth *= block_size.depth;
}
return adjusted_extent;
}
// Returns the effective extent of an image subresource, adjusted for mip level and array depth.
static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
const uint32_t mip = subresource->mipLevel;
// Return zero extent if mip level doesn't exist
if (mip >= img->createInfo.mipLevels) {
return VkExtent3D{0, 0, 0};
}
// Don't allow mip adjustment to create 0 dim, but pass along a 0 if that's what subresource specified
VkExtent3D extent = img->createInfo.extent;
// If multi-plane, adjust per-plane extent
if (FormatIsMultiplane(img->createInfo.format)) {
VkExtent2D divisors = FindMultiplaneExtentDivisors(img->createInfo.format, subresource->aspectMask);
extent.width /= divisors.width;
extent.height /= divisors.height;
}
if (img->createInfo.flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) {
extent.width = (0 == extent.width ? 0 : std::max(2U, 1 + ((extent.width - 1) >> mip)));
extent.height = (0 == extent.height ? 0 : std::max(2U, 1 + ((extent.height - 1) >> mip)));
extent.depth = (0 == extent.depth ? 0 : std::max(2U, 1 + ((extent.depth - 1) >> mip)));
} else {
extent.width = (0 == extent.width ? 0 : std::max(1U, extent.width >> mip));
extent.height = (0 == extent.height ? 0 : std::max(1U, extent.height >> mip));
extent.depth = (0 == extent.depth ? 0 : std::max(1U, extent.depth >> mip));
}
// Image arrays have an effective z extent that isn't diminished by mip level
if (VK_IMAGE_TYPE_3D != img->createInfo.imageType) {
extent.depth = img->createInfo.arrayLayers;
}
return extent;
}
// Test if the extent argument has all dimensions set to 0.
static inline bool IsExtentAllZeroes(const VkExtent3D *extent) {
return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
}
// Test if the extent argument has any dimensions set to 0.
static inline bool IsExtentSizeZero(const VkExtent3D *extent) {
return ((extent->width == 0) || (extent->height == 0) || (extent->depth == 0));
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
VkExtent3D CoreChecks::GetScaledItg(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img) const {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pPool = cb_node->command_pool.get();
if (pPool) {
granularity = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format) || FormatIsSinglePlane_422(img->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
granularity.height *= block_size.height;
}
}
return granularity;
}
// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
bool valid = true;
if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) ||
(SafeModulo(extent->height, granularity->height) != 0)) {
valid = false;
}
return valid;
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgOffset(const CMD_BUFFER_STATE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
offset_extent.height = static_cast<uint32_t>(abs(offset->y));
offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
if (IsExtentAllZeroes(&offset_extent) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family "
"image transfer granularity is (w=0, h=0, d=0).",
function, i, member, offset->x, offset->y, offset->z);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
// integer multiples of the image transfer granularity.
if (IsExtentAligned(&offset_extent, granularity) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
granularity->depth);
}
}
return skip;
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgExtent(const CMD_BUFFER_STATE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset,
const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
// subresource extent.
if (IsExtentEqual(extent, subresource_extent) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
"when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
subresource_extent->height, subresource_extent->depth);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
// integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
// subresource extent dimensions.
VkExtent3D offset_extent_sum = {};
offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
bool x_ok = true;
bool y_ok = true;
bool z_ok = true;
switch (image_type) {
case VK_IMAGE_TYPE_3D:
z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) ||
(subresource_extent->depth == offset_extent_sum.depth));
// fall through
case VK_IMAGE_TYPE_2D:
y_ok = ((0 == SafeModulo(extent->height, granularity->height)) ||
(subresource_extent->height == offset_extent_sum.height));
// fall through
case VK_IMAGE_TYPE_1D:
x_ok = ((0 == SafeModulo(extent->width, granularity->width)) ||
(subresource_extent->width == offset_extent_sum.width));
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
if (!(x_ok && y_ok && z_ok)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
"extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
function, i, member, extent->width, extent->height, extent->depth, granularity->width,
granularity->height, granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height,
extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth);
}
}
return skip;
}
bool CoreChecks::ValidateImageMipLevel(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
const uint32_t i, const char *function, const char *member, const char *vuid) const {