Replaced spaces with tabs (Q3 code style).

This commit is contained in:
Artem Kharytoniuk 2017-05-27 23:36:45 +03:00
parent 077f519ac6
commit 6079758830
8 changed files with 1533 additions and 1535 deletions

View File

@ -63,11 +63,11 @@ void GL_Bind( image_t *image ) {
glState.currenttextures[glState.currenttmu] = texnum;
qglBindTexture (GL_TEXTURE_2D, texnum);
// VULKAN
if (vk.active) {
VkDescriptorSet set = vk_resources.images[final_image->index].descriptor_set;
vk_resources.current_descriptor_sets[glState.currenttmu] = set;
}
// VULKAN
if (vk.active) {
VkDescriptorSet set = vk_resources.images[final_image->index].descriptor_set;
vk_resources.current_descriptor_sets[glState.currenttmu] = set;
}
}
}
@ -710,26 +710,26 @@ void RE_UploadCinematic (int w, int h, int cols, int rows, const byte *data, int
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP );
// VULKAN
if (vk.active) {
Vk_Image& image = vk_resources.images[tr.scratchImage[client]->index];
vkDestroyImage(vk.device, image.handle, nullptr);
vkDestroyImageView(vk.device, image.view, nullptr);
vkFreeDescriptorSets(vk.device, vk.descriptor_pool, 1, &image.descriptor_set);
image = vk_create_image(cols, rows, VK_FORMAT_R8G8B8A8_UNORM, 1, false);
vk_upload_image_data(image.handle, cols, rows, false, data, 4);
}
// VULKAN
if (vk.active) {
Vk_Image& image = vk_resources.images[tr.scratchImage[client]->index];
vkDestroyImage(vk.device, image.handle, nullptr);
vkDestroyImageView(vk.device, image.view, nullptr);
vkFreeDescriptorSets(vk.device, vk.descriptor_pool, 1, &image.descriptor_set);
image = vk_create_image(cols, rows, VK_FORMAT_R8G8B8A8_UNORM, 1, false);
vk_upload_image_data(image.handle, cols, rows, false, data, 4);
}
} else {
if (dirty) {
// otherwise, just subimage upload it so that drivers can tell we are going to be changing
// it and don't try and do a texture compression
qglTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, cols, rows, GL_RGBA, GL_UNSIGNED_BYTE, data );
// VULKAN
if (vk.active) {
const Vk_Image& image = vk_resources.images[tr.scratchImage[client]->index];
vk_upload_image_data(image.handle, cols, rows, false, data, 4);
}
// VULKAN
if (vk.active) {
const Vk_Image& image = vk_resources.images[tr.scratchImage[client]->index];
vk_upload_image_data(image.handle, cols, rows, false, data, 4);
}
}
}
}
@ -1045,8 +1045,8 @@ const void *RB_SwapBuffers( const void *data ) {
backEnd.projection2D = qfalse;
// VULKAN
vk_end_frame();
// VULKAN
vk_end_frame();
return (const void *)(cmd + 1);
}

View File

@ -144,17 +144,17 @@ void GL_TextureMode( const char *string ) {
}
}
// VULKAN
if (vk.active) {
VK_CHECK(vkDeviceWaitIdle(vk.device));
for ( i = 0 ; i < tr.numImages ; i++ ) {
image_t* glt = tr.images[i];
if (glt->mipmap) {
Vk_Image& image = vk_resources.images[i];
vk_update_descriptor_set(image.descriptor_set, image.view, true, glt->wrapClampMode == GL_REPEAT);
}
}
}
// VULKAN
if (vk.active) {
VK_CHECK(vkDeviceWaitIdle(vk.device));
for ( i = 0 ; i < tr.numImages ; i++ ) {
image_t* glt = tr.images[i];
if (glt->mipmap) {
Vk_Image& image = vk_resources.images[i];
vk_update_descriptor_set(image.descriptor_set, image.view, true, glt->wrapClampMode == GL_REPEAT);
}
}
}
}
/*

View File

@ -318,10 +318,10 @@ typedef struct {
qboolean isDetail;
// VULKAN
VkPipeline vk_pipeline = VK_NULL_HANDLE;
VkPipeline vk_portal_pipeline = VK_NULL_HANDLE;
VkPipeline vk_mirror_pipeline = VK_NULL_HANDLE;
// VULKAN
VkPipeline vk_pipeline = VK_NULL_HANDLE;
VkPipeline vk_portal_pipeline = VK_NULL_HANDLE;
VkPipeline vk_mirror_pipeline = VK_NULL_HANDLE;
} shaderStage_t;
struct shaderCommands_s;
@ -929,16 +929,16 @@ extern glconfig_t glConfig; // outside of TR since it shouldn't be cleared duri
extern glstate_t glState; // outside of TR since it shouldn't be cleared during ref re-init
// VULKAN
extern Vk_Instance vk; // shouldn't be cleared during ref re-init
extern Vk_Resources vk_resources; // this data is cleared during ref re-init
extern Vk_Instance vk; // shouldn't be cleared during ref re-init
extern Vk_Resources vk_resources; // this data is cleared during ref re-init
//
// cvars
//
extern cvar_t *r_renderAPI; // 3D API to use: 0 - OpenGL, 1 - Vulkan.
extern cvar_t *r_renderAPI; // 3D API to use: 0 - OpenGL, 1 - Vulkan.
extern cvar_t *r_twinMode; // If enabled, renderer creates two separate windows.
extern cvar_t *r_twinMode; // If enabled, renderer creates two separate windows.
// The first window uses rendering API specified by r_renderAPI,
// the second window uses rendering API corresponding to (1 - r_renderAPI).

View File

@ -393,11 +393,11 @@ static void ProjectDlightTexture( void ) {
backEnd.pc.c_totalIndexes += numIndexes;
backEnd.pc.c_dlightIndexes += numIndexes;
// VULKAN
if (vk.active) {
VkPipeline pipeline = vk.dlight_pipelines[dl->additive > 0 ? 1 : 0][tess.shader->cullType][tess.shader->polygonOffset];
vk_shade_geometry(pipeline, false, Vk_Depth_Range::normal);
}
// VULKAN
if (vk.active) {
VkPipeline pipeline = vk.dlight_pipelines[dl->additive > 0 ? 1 : 0][tess.shader->cullType][tess.shader->polygonOffset];
vk_shade_geometry(pipeline, false, Vk_Depth_Range::normal);
}
}
}
@ -437,12 +437,12 @@ static void RB_FogPass( void ) {
R_DrawElements( tess.numIndexes, tess.indexes );
// VULKAN
if (vk.active) {
assert(tess.shader->fogPass > 0);
VkPipeline pipeline = vk.fog_pipelines[tess.shader->fogPass - 1][tess.shader->cullType][tess.shader->polygonOffset];
vk_shade_geometry(pipeline, false, Vk_Depth_Range::normal);
}
// VULKAN
if (vk.active) {
assert(tess.shader->fogPass > 0);
VkPipeline pipeline = vk.fog_pipelines[tess.shader->fogPass - 1][tess.shader->cullType][tess.shader->polygonOffset];
vk_shade_geometry(pipeline, false, Vk_Depth_Range::normal);
}
}
/*
@ -745,8 +745,8 @@ static void ComputeTexCoords( shaderStage_t *pStage ) {
*/
static void RB_IterateStagesGeneric( shaderCommands_t *input )
{
// VULKAN
vk_bind_geometry();
// VULKAN
vk_bind_geometry();
for ( int stage = 0; stage < MAX_SHADER_STAGES; stage++ )
{
@ -794,23 +794,21 @@ static void RB_IterateStagesGeneric( shaderCommands_t *input )
R_DrawElements( input->numIndexes, input->indexes );
}
// VULKAN
if (vk.active) {
VkPipeline pipeline = pStage->vk_pipeline;
if (backEnd.viewParms.isMirror)
pipeline = pStage->vk_mirror_pipeline;
else if (backEnd.viewParms.isPortal)
pipeline = pStage->vk_portal_pipeline;
// VULKAN
if (vk.active) {
VkPipeline pipeline = pStage->vk_pipeline;
if (backEnd.viewParms.isMirror)
pipeline = pStage->vk_mirror_pipeline;
else if (backEnd.viewParms.isPortal)
pipeline = pStage->vk_portal_pipeline;
Vk_Depth_Range depth_range;
Vk_Depth_Range depth_range = Vk_Depth_Range::normal;
if (input->shader->isSky) {
depth_range = Vk_Depth_Range::force_one;
if (r_showsky->integer)
depth_range = Vk_Depth_Range::force_zero;
} else if (backEnd.currentEntity->e.renderfx & RF_DEPTHHACK) {
depth_range = Vk_Depth_Range::weapon;
} else {
depth_range = Vk_Depth_Range::normal;
}
if (r_lightmap->integer && multitexture)

View File

@ -2160,38 +2160,38 @@ static shader_t *FinishShader( void ) {
shader.sort = SS_FOG;
}
// VULKAN: create pipelines for each shader stage
if (vk.active) {
Vk_Pipeline_Def def;
def.face_culling = shader.cullType;
def.polygon_offset = (shader.polygonOffset == qtrue);
// VULKAN: create pipelines for each shader stage
if (vk.active) {
Vk_Pipeline_Def def;
def.face_culling = shader.cullType;
def.polygon_offset = (shader.polygonOffset == qtrue);
for (int i = 0; i < stage; i++) {
shaderStage_t *pStage = &stages[i];
def.state_bits = pStage->stateBits;
for (int i = 0; i < stage; i++) {
shaderStage_t *pStage = &stages[i];
def.state_bits = pStage->stateBits;
if (pStage->bundle[1].image[0] == nullptr)
def.shader_type = Vk_Shader_Type::single_texture;
else if (shader.multitextureEnv == GL_MODULATE)
def.shader_type = Vk_Shader_Type::multi_texture_mul;
else if (shader.multitextureEnv == GL_ADD)
def.shader_type = Vk_Shader_Type::multi_texture_add;
else
ri.Error(ERR_FATAL, "Vulkan: could not create pipelines for q3 shader '%s'\n", shader.name);
if (pStage->bundle[1].image[0] == nullptr)
def.shader_type = Vk_Shader_Type::single_texture;
else if (shader.multitextureEnv == GL_MODULATE)
def.shader_type = Vk_Shader_Type::multi_texture_mul;
else if (shader.multitextureEnv == GL_ADD)
def.shader_type = Vk_Shader_Type::multi_texture_add;
else
ri.Error(ERR_FATAL, "Vulkan: could not create pipelines for q3 shader '%s'\n", shader.name);
def.clipping_plane = false;
def.mirror = false;
pStage->vk_pipeline = vk_find_pipeline(def);
def.clipping_plane = false;
def.mirror = false;
pStage->vk_pipeline = vk_find_pipeline(def);
def.clipping_plane = true;
def.mirror = false;
pStage->vk_portal_pipeline = vk_find_pipeline(def);
def.clipping_plane = true;
def.mirror = false;
pStage->vk_portal_pipeline = vk_find_pipeline(def);
def.clipping_plane = true;
def.mirror = true;
pStage->vk_mirror_pipeline = vk_find_pipeline(def);
}
}
def.clipping_plane = true;
def.mirror = true;
pStage->vk_mirror_pipeline = vk_find_pipeline(def);
}
}
return GeneratePermanentShader();
}

View File

@ -450,53 +450,53 @@ static void DrawSkyBox( shader_t *shader )
sky_mins_subd,
sky_maxs_subd );
// VULKAN: draw skybox side
if (vk.active) {
// VULKAN: draw skybox side
if (vk.active) {
GL_Bind(shader->sky.outerbox[sky_texorder[i]]);
Com_Memset( tess.svars.colors, tr.identityLightByte, tess.numVertexes * 4 );
tess.numVertexes = 0;
tess.numIndexes = 0;
tess.numVertexes = 0;
tess.numIndexes = 0;
for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t < sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ )
{
for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s < sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ )
{
int ndx = tess.numVertexes;
for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t < sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ )
{
for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s < sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ )
{
int ndx = tess.numVertexes;
tess.indexes[ tess.numIndexes ] = ndx;
tess.indexes[ tess.numIndexes + 1 ] = ndx + 1;
tess.indexes[ tess.numIndexes + 2 ] = ndx + 2;
tess.indexes[ tess.numIndexes ] = ndx;
tess.indexes[ tess.numIndexes + 1 ] = ndx + 1;
tess.indexes[ tess.numIndexes + 2 ] = ndx + 2;
tess.indexes[ tess.numIndexes + 3 ] = ndx + 2;
tess.indexes[ tess.numIndexes + 4 ] = ndx + 1;
tess.indexes[ tess.numIndexes + 5 ] = ndx + 3;
tess.numIndexes += 6;
tess.indexes[ tess.numIndexes + 3 ] = ndx + 2;
tess.indexes[ tess.numIndexes + 4 ] = ndx + 1;
tess.indexes[ tess.numIndexes + 5 ] = ndx + 3;
tess.numIndexes += 6;
VectorCopy(s_skyPoints[t][s], tess.xyz[ndx]);
tess.svars.texcoords[0][ndx][0] = s_skyTexCoords[t][s][0];
tess.svars.texcoords[0][ndx][1] = s_skyTexCoords[t][s][1];
VectorCopy(s_skyPoints[t][s], tess.xyz[ndx]);
tess.svars.texcoords[0][ndx][0] = s_skyTexCoords[t][s][0];
tess.svars.texcoords[0][ndx][1] = s_skyTexCoords[t][s][1];
VectorCopy(s_skyPoints[t + 1][s], tess.xyz[ndx + 1]);
tess.svars.texcoords[0][ndx + 1][0] = s_skyTexCoords[t + 1][s][0];
tess.svars.texcoords[0][ndx + 1][1] = s_skyTexCoords[t + 1][s][1];
VectorCopy(s_skyPoints[t + 1][s], tess.xyz[ndx + 1]);
tess.svars.texcoords[0][ndx + 1][0] = s_skyTexCoords[t + 1][s][0];
tess.svars.texcoords[0][ndx + 1][1] = s_skyTexCoords[t + 1][s][1];
VectorCopy(s_skyPoints[t][s + 1], tess.xyz[ndx + 2]);
tess.svars.texcoords[0][ndx + 2][0] = s_skyTexCoords[t][s + 1][0];
tess.svars.texcoords[0][ndx + 2][1] = s_skyTexCoords[t][s + 1][1];
VectorCopy(s_skyPoints[t][s + 1], tess.xyz[ndx + 2]);
tess.svars.texcoords[0][ndx + 2][0] = s_skyTexCoords[t][s + 1][0];
tess.svars.texcoords[0][ndx + 2][1] = s_skyTexCoords[t][s + 1][1];
VectorCopy(s_skyPoints[t + 1][s + 1], tess.xyz[ndx + 3]);
tess.svars.texcoords[0][ndx + 3][0] = s_skyTexCoords[t + 1][s + 1][0];
tess.svars.texcoords[0][ndx + 3][1] = s_skyTexCoords[t + 1][s + 1][1];
VectorCopy(s_skyPoints[t + 1][s + 1], tess.xyz[ndx + 3]);
tess.svars.texcoords[0][ndx + 3][0] = s_skyTexCoords[t + 1][s + 1][0];
tess.svars.texcoords[0][ndx + 3][1] = s_skyTexCoords[t + 1][s + 1][1];
tess.numVertexes += 4;
}
}
tess.numVertexes += 4;
}
}
vk_bind_geometry();
vk_shade_geometry(vk.skybox_pipeline, false, r_showsky->integer ? Vk_Depth_Range::force_zero : Vk_Depth_Range::force_one);
}
vk_bind_geometry();
vk_shade_geometry(vk.skybox_pipeline, false, r_showsky->integer ? Vk_Depth_Range::force_zero : Vk_Depth_Range::force_one);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -17,15 +17,15 @@ const int IMAGE_CHUNK_SIZE = 32 * 1024 * 1024;
const int MAX_IMAGE_CHUNKS = 16;
#define VK_CHECK(function_call) { \
VkResult result = function_call; \
if (result < 0) \
ri.Error(ERR_FATAL, "Vulkan error: function %s, error code %d", #function_call, result); \
VkResult result = function_call; \
if (result < 0) \
ri.Error(ERR_FATAL, "Vulkan error: function %s, error code %d", #function_call, result); \
}
enum class Vk_Shader_Type {
single_texture,
multi_texture_mul,
multi_texture_add
single_texture,
multi_texture_mul,
multi_texture_add
};
// used with cg_shadows == 2
@ -43,29 +43,29 @@ enum class Vk_Depth_Range {
};
struct Vk_Sampler_Def {
bool repeat_texture = false; // clamp/repeat texture addressing mode
int gl_mag_filter = 0; // GL_XXX mag filter
int gl_min_filter = 0; // GL_XXX min filter
bool repeat_texture = false; // clamp/repeat texture addressing mode
int gl_mag_filter = 0; // GL_XXX mag filter
int gl_min_filter = 0; // GL_XXX min filter
};
struct Vk_Pipeline_Def {
Vk_Shader_Type shader_type = Vk_Shader_Type::single_texture;
unsigned int state_bits = 0; // GLS_XXX flags
int face_culling = 0;// cullType_t
bool polygon_offset = false;
bool clipping_plane = false;
bool mirror = false;
Vk_Shader_Type shader_type = Vk_Shader_Type::single_texture;
unsigned int state_bits = 0; // GLS_XXX flags
int face_culling = 0;// cullType_t
bool polygon_offset = false;
bool clipping_plane = false;
bool mirror = false;
bool line_primitives = false;
Vk_Shadow_Phase shadow_phase = Vk_Shadow_Phase::disabled;
};
struct Vk_Image {
VkImage handle = VK_NULL_HANDLE;
VkImageView view = VK_NULL_HANDLE;
VkImage handle = VK_NULL_HANDLE;
VkImageView view = VK_NULL_HANDLE;
// Descriptor set that contains single descriptor used to access the given image.
// It is updated only once during image initialization.
VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
// Descriptor set that contains single descriptor used to access the given image.
// It is updated only once during image initialization.
VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
};
//
@ -106,77 +106,77 @@ void vk_read_pixels(byte* buffer);
// Vulkan specific structures used by the engine.
struct Vk_Instance {
bool active = false;
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
VkSurfaceKHR surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR surface_format = {};
bool active = false;
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
VkSurfaceKHR surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR surface_format = {};
uint32_t queue_family_index = 0;
VkDevice device = VK_NULL_HANDLE;
VkQueue queue = VK_NULL_HANDLE;
uint32_t queue_family_index = 0;
VkDevice device = VK_NULL_HANDLE;
VkQueue queue = VK_NULL_HANDLE;
VkSwapchainKHR swapchain = VK_NULL_HANDLE;
uint32_t swapchain_image_count = 0;
VkImage swapchain_images[MAX_SWAPCHAIN_IMAGES];
VkImageView swapchain_image_views[MAX_SWAPCHAIN_IMAGES];
VkSwapchainKHR swapchain = VK_NULL_HANDLE;
uint32_t swapchain_image_count = 0;
VkImage swapchain_images[MAX_SWAPCHAIN_IMAGES];
VkImageView swapchain_image_views[MAX_SWAPCHAIN_IMAGES];
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkImage depth_image = VK_NULL_HANDLE;
VkDeviceMemory depth_image_memory = VK_NULL_HANDLE;
VkImageView depth_image_view = VK_NULL_HANDLE;
VkImage depth_image = VK_NULL_HANDLE;
VkDeviceMemory depth_image_memory = VK_NULL_HANDLE;
VkImageView depth_image_view = VK_NULL_HANDLE;
VkRenderPass render_pass = VK_NULL_HANDLE;
VkFramebuffer framebuffers[MAX_SWAPCHAIN_IMAGES];
VkRenderPass render_pass = VK_NULL_HANDLE;
VkFramebuffer framebuffers[MAX_SWAPCHAIN_IMAGES];
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkDescriptorSetLayout set_layout = VK_NULL_HANDLE;
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkDescriptorSetLayout set_layout = VK_NULL_HANDLE;
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
VkBuffer vertex_buffer = VK_NULL_HANDLE;
byte* vertex_buffer_ptr = nullptr; // pointer to mapped vertex buffer
int xyz_elements = 0;
int color_st_elements = 0;
VkBuffer vertex_buffer = VK_NULL_HANDLE;
byte* vertex_buffer_ptr = nullptr; // pointer to mapped vertex buffer
int xyz_elements = 0;
int color_st_elements = 0;
VkBuffer index_buffer = VK_NULL_HANDLE;
byte* index_buffer_ptr = nullptr; // pointer to mapped index buffer
VkDeviceSize index_buffer_offset = 0;
VkBuffer index_buffer = VK_NULL_HANDLE;
byte* index_buffer_ptr = nullptr; // pointer to mapped index buffer
VkDeviceSize index_buffer_offset = 0;
// host visible memory that holds both vertex and index data
VkDeviceMemory geometry_buffer_memory = VK_NULL_HANDLE;
// host visible memory that holds both vertex and index data
VkDeviceMemory geometry_buffer_memory = VK_NULL_HANDLE;
VkSemaphore image_acquired = VK_NULL_HANDLE;
uint32_t swapchain_image_index = -1;
VkSemaphore image_acquired = VK_NULL_HANDLE;
uint32_t swapchain_image_index = -1;
VkSemaphore rendering_finished = VK_NULL_HANDLE;
VkFence rendering_finished_fence = VK_NULL_HANDLE;
VkSemaphore rendering_finished = VK_NULL_HANDLE;
VkFence rendering_finished_fence = VK_NULL_HANDLE;
VkShaderModule single_texture_vs = VK_NULL_HANDLE;
VkShaderModule single_texture_clipping_plane_vs = VK_NULL_HANDLE;
VkShaderModule single_texture_fs = VK_NULL_HANDLE;
VkShaderModule multi_texture_vs = VK_NULL_HANDLE;
VkShaderModule multi_texture_clipping_plane_vs = VK_NULL_HANDLE;
VkShaderModule multi_texture_mul_fs = VK_NULL_HANDLE;
VkShaderModule multi_texture_add_fs = VK_NULL_HANDLE;
VkShaderModule single_texture_vs = VK_NULL_HANDLE;
VkShaderModule single_texture_clipping_plane_vs = VK_NULL_HANDLE;
VkShaderModule single_texture_fs = VK_NULL_HANDLE;
VkShaderModule multi_texture_vs = VK_NULL_HANDLE;
VkShaderModule multi_texture_clipping_plane_vs = VK_NULL_HANDLE;
VkShaderModule multi_texture_mul_fs = VK_NULL_HANDLE;
VkShaderModule multi_texture_add_fs = VK_NULL_HANDLE;
VkPipeline skybox_pipeline = VK_NULL_HANDLE;
VkPipeline skybox_pipeline = VK_NULL_HANDLE;
// dim 0: 0 - front side, 1 - back size
// dim 1: 0 - normal view, 1 - mirror view
VkPipeline shadow_volume_pipelines[2][2];
VkPipeline shadow_finish_pipeline;
// dim 0 is based on fogPass_t: 0 - corresponds to FP_EQUAL, 1 - corresponds to FP_LE.
// dim 1 is directly a cullType_t enum value.
// dim 2 is a polygon offset value (0 - off, 1 - on).
VkPipeline fog_pipelines[2][3][2];
// dim 0 is based on fogPass_t: 0 - corresponds to FP_EQUAL, 1 - corresponds to FP_LE.
// dim 1 is directly a cullType_t enum value.
// dim 2 is a polygon offset value (0 - off, 1 - on).
VkPipeline fog_pipelines[2][3][2];
// dim 0 is based on dlight additive flag: 0 - not additive, 1 - additive
// dim 1 is directly a cullType_t enum value.
// dim 2 is a polygon offset value (0 - off, 1 - on).
VkPipeline dlight_pipelines[2][3][2];
// dim 0 is based on dlight additive flag: 0 - not additive, 1 - additive
// dim 1 is directly a cullType_t enum value.
// dim 2 is a polygon offset value (0 - off, 1 - on).
VkPipeline dlight_pipelines[2][3][2];
VkPipeline tris_debug_pipeline;
VkPipeline tris_mirror_debug_pipeline;
@ -187,48 +187,48 @@ struct Vk_Instance {
};
struct Vk_Resources {
//
// Resources.
//
int num_samplers = 0;
Vk_Sampler_Def sampler_defs[MAX_VK_SAMPLERS];
VkSampler samplers[MAX_VK_SAMPLERS];
//
// Resources.
//
int num_samplers = 0;
Vk_Sampler_Def sampler_defs[MAX_VK_SAMPLERS];
VkSampler samplers[MAX_VK_SAMPLERS];
int num_pipelines = 0;
Vk_Pipeline_Def pipeline_defs[MAX_VK_PIPELINES];
VkPipeline pipelines[MAX_VK_PIPELINES];
int num_pipelines = 0;
Vk_Pipeline_Def pipeline_defs[MAX_VK_PIPELINES];
VkPipeline pipelines[MAX_VK_PIPELINES];
float pipeline_create_time;
Vk_Image images[MAX_VK_IMAGES];
Vk_Image images[MAX_VK_IMAGES];
//
// Memory allocations.
//
struct Chunk {
VkDeviceMemory memory = VK_NULL_HANDLE;
VkDeviceSize used = 0;
};
//
// Memory allocations.
//
struct Chunk {
VkDeviceMemory memory = VK_NULL_HANDLE;
VkDeviceSize used = 0;
};
int num_image_chunks = 0;
Chunk image_chunks[MAX_IMAGE_CHUNKS];
int num_image_chunks = 0;
Chunk image_chunks[MAX_IMAGE_CHUNKS];
// Host visible memory used to copy image data to device local memory.
VkBuffer staging_buffer = VK_NULL_HANDLE;
VkDeviceMemory staging_buffer_memory = VK_NULL_HANDLE;
VkDeviceSize staging_buffer_size = 0;
byte* staging_buffer_ptr = nullptr; // pointer to mapped staging buffer
// Host visible memory used to copy image data to device local memory.
VkBuffer staging_buffer = VK_NULL_HANDLE;
VkDeviceMemory staging_buffer_memory = VK_NULL_HANDLE;
VkDeviceSize staging_buffer_size = 0;
byte* staging_buffer_ptr = nullptr; // pointer to mapped staging buffer
//
// State.
//
//
// State.
//
// Descriptor sets corresponding to bound texture images.
VkDescriptorSet current_descriptor_sets[2];
// Descriptor sets corresponding to bound texture images.
VkDescriptorSet current_descriptor_sets[2];
// This flag is used to decide whether framebuffer's attachments should be cleared
// with vmCmdClearAttachment (dirty_attachments == true), or they have just been
// cleared by render pass instance clear op (dirty_attachments == false).
bool dirty_attachments;
// This flag is used to decide whether framebuffer's attachments should be cleared
// with vmCmdClearAttachment (dirty_attachments == true), or they have just been
// cleared by render pass instance clear op (dirty_attachments == false).
bool dirty_attachments;
float modelview_transform[16];
};