DX12: cleanup.

This commit is contained in:
Artem Kharytoniuk 2017-12-25 10:01:32 +01:00
parent ac233e7ab9
commit 52ac138485
4 changed files with 94 additions and 87 deletions

View File

@ -386,8 +386,9 @@ static void RB_Hyperspace( void ) {
qglClearColor( c, c, c, 1 );
qglClear( GL_COLOR_BUFFER_BIT );
// VULKAN
float color[4] = { c, c, c, 1 };
// VULKAN
vk_clear_attachments(false, true, color);
// DX12

View File

@ -701,65 +701,6 @@ static int upload_gl_image(const Image_Upload_Data& upload_data, int texture_add
return internal_format;
}
// DX12
static Dx_Image upload_dx_image(const Image_Upload_Data& upload_data, bool repeat_texture, int image_index) {
int w = upload_data.base_level_width;
int h = upload_data.base_level_height;
bool has_alpha = false;
for (int i = 0; i < w * h; i++) {
if (upload_data.buffer[i*4 + 3] != 255) {
has_alpha = true;
break;
}
}
byte* buffer = upload_data.buffer;
Dx_Image_Format format = IMAGE_FORMAT_RGBA8;
int bytes_per_pixel = 4;
if (r_texturebits->integer <= 16) {
buffer = (byte*) ri.Hunk_AllocateTempMemory( upload_data.buffer_size / 2 );
format = has_alpha ? IMAGE_FORMAT_BGRA4 : IMAGE_FORMAT_BGR5A1;
bytes_per_pixel = 2;
}
if (format == IMAGE_FORMAT_BGR5A1) {
auto p = (uint16_t*)buffer;
for (int i = 0; i < upload_data.buffer_size; i += 4, p++) {
byte r = upload_data.buffer[i+0];
byte g = upload_data.buffer[i+1];
byte b = upload_data.buffer[i+2];
*p = (uint32_t((b/255.0) * 31.0 + 0.5) << 0) |
(uint32_t((g/255.0) * 31.0 + 0.5) << 5) |
(uint32_t((r/255.0) * 31.0 + 0.5) << 10) |
(1 << 15);
}
} else if (format == IMAGE_FORMAT_BGRA4) {
auto p = (uint16_t*)buffer;
for (int i = 0; i < upload_data.buffer_size; i += 4, p++) {
byte r = upload_data.buffer[i+0];
byte g = upload_data.buffer[i+1];
byte b = upload_data.buffer[i+2];
byte a = upload_data.buffer[i+3];
*p =(uint32_t((b/255.0) * 15.0 + 0.5) << 0) |
(uint32_t((g/255.0) * 15.0 + 0.5) << 4) |
(uint32_t((r/255.0) * 15.0 + 0.5) << 8) |
(uint32_t((a/255.0) * 15.0 + 0.5) << 12);
}
}
Dx_Image image = dx_create_image(w, h, format, upload_data.mip_levels, repeat_texture, image_index);
dx_upload_image_data(image.texture, w, h, upload_data.mip_levels, buffer, bytes_per_pixel);
if (bytes_per_pixel == 2)
ri.Hunk_FreeTempMemory(buffer);
return image;
}
// VULKAN
static Vk_Image upload_vk_image(const Image_Upload_Data& upload_data, bool repeat_texture) {
int w = upload_data.base_level_width;
@ -819,6 +760,65 @@ static Vk_Image upload_vk_image(const Image_Upload_Data& upload_data, bool repea
return image;
}
// DX12
static Dx_Image upload_dx_image(const Image_Upload_Data& upload_data, bool repeat_texture, int image_index) {
int w = upload_data.base_level_width;
int h = upload_data.base_level_height;
bool has_alpha = false;
for (int i = 0; i < w * h; i++) {
if (upload_data.buffer[i*4 + 3] != 255) {
has_alpha = true;
break;
}
}
byte* buffer = upload_data.buffer;
Dx_Image_Format format = IMAGE_FORMAT_RGBA8;
int bytes_per_pixel = 4;
if (r_texturebits->integer <= 16) {
buffer = (byte*) ri.Hunk_AllocateTempMemory( upload_data.buffer_size / 2 );
format = has_alpha ? IMAGE_FORMAT_BGRA4 : IMAGE_FORMAT_BGR5A1;
bytes_per_pixel = 2;
}
if (format == IMAGE_FORMAT_BGR5A1) {
auto p = (uint16_t*)buffer;
for (int i = 0; i < upload_data.buffer_size; i += 4, p++) {
byte r = upload_data.buffer[i+0];
byte g = upload_data.buffer[i+1];
byte b = upload_data.buffer[i+2];
*p = (uint32_t((b/255.0) * 31.0 + 0.5) << 0) |
(uint32_t((g/255.0) * 31.0 + 0.5) << 5) |
(uint32_t((r/255.0) * 31.0 + 0.5) << 10) |
(1 << 15);
}
} else if (format == IMAGE_FORMAT_BGRA4) {
auto p = (uint16_t*)buffer;
for (int i = 0; i < upload_data.buffer_size; i += 4, p++) {
byte r = upload_data.buffer[i+0];
byte g = upload_data.buffer[i+1];
byte b = upload_data.buffer[i+2];
byte a = upload_data.buffer[i+3];
*p =(uint32_t((b/255.0) * 15.0 + 0.5) << 0) |
(uint32_t((g/255.0) * 15.0 + 0.5) << 4) |
(uint32_t((r/255.0) * 15.0 + 0.5) << 8) |
(uint32_t((a/255.0) * 15.0 + 0.5) << 12);
}
}
Dx_Image image = dx_create_image(w, h, format, upload_data.mip_levels, repeat_texture, image_index);
dx_upload_image_data(image.texture, w, h, upload_data.mip_levels, buffer, bytes_per_pixel);
if (bytes_per_pixel == 2)
ri.Hunk_FreeTempMemory(buffer);
return image;
}
/*
================
R_CreateImage

View File

@ -392,6 +392,8 @@ void RB_TakeScreenshot( int x, int y, int width, int height, char *fileName ) {
buffer2_ptr += 4;
}
ri.Hunk_FreeTempMemory(buffer2);
} else if (r_renderAPI->integer == 2) { // DX12
ri.Printf(PRINT_WARNING, "RT_TakeScreenshot is not implemented for DX12");
}
// swap rgb to bgr
@ -424,6 +426,8 @@ void RB_TakeScreenshotJPEG( int x, int y, int width, int height, char *fileName
qglReadPixels( x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer );
} else if (r_renderAPI->integer == 1) { // VULKAN
vk_read_pixels(buffer);
} else if (r_renderAPI->integer == 2) { // DX12
ri.Printf(PRINT_WARNING, "RT_TakeScreenshotJPEG is not implemented for DX12");
}
// gamma correct
@ -574,6 +578,8 @@ void R_LevelShot( void ) {
buffer2_ptr += 4;
}
ri.Hunk_FreeTempMemory(buffer2);
} else if (r_renderAPI->integer == 2) { // DX12
ri.Printf(PRINT_WARNING, "R_LevelShot is not implemented for DX12");
}
// resample from source
@ -844,6 +850,11 @@ void GfxInfo_f( void )
ri.Printf(PRINT_ALL, "Vk device name: %s\n", props.deviceName);
}
// DX12
if (dx.active) {
ri.Printf( PRINT_ALL, "\nActive 3D API: DirectX 12\n" );
}
//
// Info that doesn't depend on r_renderAPI
//
@ -1160,6 +1171,11 @@ void RE_EndRegistration( void ) {
if (vk.active) {
ri.Printf(PRINT_ALL, "Vulkan: pipelines create time %d msec\n", (int)(vk_world.pipeline_create_time * 1000));
}
// DX12
if (dx.active) {
ri.Printf(PRINT_ALL, "DX12: pipelines create time %d msec\n", (int)(dx_world.pipeline_create_time * 1000));
}
}

View File

@ -825,12 +825,19 @@ static void RB_IterateStagesGeneric( shaderCommands_t *input )
}
// VULKAN
if (vk.active) {
VkPipeline pipeline = pStage->vk_pipeline;
if (backEnd.viewParms.isMirror)
pipeline = pStage->vk_mirror_pipeline;
else if (backEnd.viewParms.isPortal)
pipeline = pStage->vk_portal_pipeline;
// DX12
if (vk.active || dx.active) {
VkPipeline vk_pipeline = pStage->vk_pipeline;
ID3D12PipelineState* dx_pipeline = pStage->dx_pipeline;
if (backEnd.viewParms.isMirror) {
vk_pipeline = pStage->vk_mirror_pipeline;
dx_pipeline = pStage->dx_mirror_pipeline;
}
else if (backEnd.viewParms.isPortal) {
vk_pipeline = pStage->vk_portal_pipeline;
dx_pipeline = pStage->dx_portal_pipeline;
}
Vk_Depth_Range depth_range = Vk_Depth_Range::normal;
if (input->shader->isSky) {
@ -844,29 +851,12 @@ static void RB_IterateStagesGeneric( shaderCommands_t *input )
if (r_lightmap->integer && multitexture)
GL_Bind(tr.whiteImage); // replace diffuse texture with a white one thus effectively render only lightmap
vk_shade_geometry(pipeline, multitexture, depth_range);
if (vk.active)
vk_shade_geometry(vk_pipeline, multitexture, depth_range);
if (dx.active)
dx_shade_geometry(dx_pipeline, multitexture, depth_range, true, false);
}
// DX12
if (dx.active) {
ID3D12PipelineState* pipeline = pStage->dx_pipeline;
if (backEnd.viewParms.isMirror)
pipeline = pStage->dx_mirror_pipeline;
else if (backEnd.viewParms.isPortal)
pipeline = pStage->dx_portal_pipeline;
Vk_Depth_Range depth_range = Vk_Depth_Range::normal;
if (input->shader->isSky) {
depth_range = Vk_Depth_Range::force_one;
if (r_showsky->integer)
depth_range = Vk_Depth_Range::force_zero;
} else if (backEnd.currentEntity->e.renderfx & RF_DEPTHHACK) {
depth_range = Vk_Depth_Range::weapon;
}
dx_shade_geometry(pipeline, multitexture, depth_range, true, false);
}
// allow skipping out to show just lightmaps during development
if ( r_lightmap->integer && ( pStage->bundle[0].isLightmap || pStage->bundle[1].isLightmap ) )
{