2021-01-25 15:55:45 +00:00
|
|
|
/*
|
|
|
|
vulkan_model_alais.c
|
|
|
|
|
|
|
|
Alias model processing for Vulkan
|
|
|
|
|
|
|
|
Copyright (C) 2021 Bill Currie <bill@taniwha.org>
|
|
|
|
|
|
|
|
Author: Bill Currie <bill@taniwha.org>
|
|
|
|
Date: 2021/1/24
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU General Public License
|
|
|
|
as published by the Free Software Foundation; either version 2
|
|
|
|
of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
|
|
|
|
See the GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to:
|
|
|
|
|
|
|
|
Free Software Foundation, Inc.
|
|
|
|
59 Temple Place - Suite 330
|
|
|
|
Boston, MA 02111-1307, USA
|
|
|
|
|
|
|
|
*/
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
# include "config.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_STRING_H
|
|
|
|
# include <string.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_STRINGS_H
|
|
|
|
# include <strings.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
2021-12-02 13:48:50 +00:00
|
|
|
#include "QF/cvar.h"
|
2021-01-25 15:55:45 +00:00
|
|
|
#include "QF/va.h"
|
|
|
|
|
|
|
|
#include "QF/modelgen.h"
|
|
|
|
#include "QF/vid.h"
|
|
|
|
#include "QF/Vulkan/qf_alias.h"
|
|
|
|
#include "QF/Vulkan/qf_texture.h"
|
2021-02-01 15:11:47 +00:00
|
|
|
#include "QF/Vulkan/barrier.h"
|
2021-01-25 15:55:45 +00:00
|
|
|
#include "QF/Vulkan/buffer.h"
|
|
|
|
#include "QF/Vulkan/device.h"
|
2021-01-31 10:58:55 +00:00
|
|
|
#include "QF/Vulkan/debug.h"
|
2021-02-01 15:11:47 +00:00
|
|
|
#include "QF/Vulkan/image.h"
|
2021-01-25 15:55:45 +00:00
|
|
|
#include "QF/Vulkan/instance.h"
|
|
|
|
#include "QF/Vulkan/staging.h"
|
|
|
|
|
|
|
|
#include "mod_internal.h"
|
|
|
|
#include "r_internal.h"
|
|
|
|
#include "vid_vulkan.h"
|
|
|
|
|
|
|
|
static vec3_t vertex_normals[NUMVERTEXNORMALS] = {
|
|
|
|
#include "anorms.h"
|
|
|
|
};
|
|
|
|
|
2021-01-26 11:33:51 +00:00
|
|
|
static void
|
|
|
|
skin_clear (int skin_offset, aliashdr_t *hdr, vulkan_ctx_t *ctx)
|
|
|
|
{
|
2021-02-01 15:11:47 +00:00
|
|
|
qfv_device_t *device = ctx->device;
|
|
|
|
qfv_devfuncs_t *dfunc = device->funcs;
|
2021-02-02 10:53:36 +00:00
|
|
|
qfv_alias_skin_t *skin = (qfv_alias_skin_t *) ((byte *) hdr + skin_offset);
|
2021-02-01 15:11:47 +00:00
|
|
|
|
2021-12-08 15:25:50 +00:00
|
|
|
Vulkan_AliasRemoveSkin (ctx, skin);
|
2021-02-01 15:11:47 +00:00
|
|
|
dfunc->vkDestroyImageView (device->dev, skin->view, 0);
|
|
|
|
dfunc->vkDestroyImage (device->dev, skin->image, 0);
|
|
|
|
dfunc->vkFreeMemory (device->dev, skin->memory, 0);
|
2021-01-26 11:33:51 +00:00
|
|
|
}
|
|
|
|
|
2021-01-25 15:55:45 +00:00
|
|
|
static void
|
|
|
|
vulkan_alias_clear (model_t *m, void *data)
|
|
|
|
{
|
2021-01-26 04:46:33 +00:00
|
|
|
vulkan_ctx_t *ctx = data;
|
|
|
|
qfv_device_t *device = ctx->device;
|
|
|
|
qfv_devfuncs_t *dfunc = device->funcs;
|
|
|
|
aliashdr_t *hdr;
|
|
|
|
qfv_alias_mesh_t *mesh;
|
|
|
|
|
2021-04-02 16:14:44 +00:00
|
|
|
QFV_DeviceWaitIdle (device);
|
|
|
|
|
2021-01-26 04:46:33 +00:00
|
|
|
m->needload = true; //FIXME is this right?
|
|
|
|
if (!(hdr = m->aliashdr)) {
|
|
|
|
hdr = Cache_Get (&m->cache);
|
|
|
|
}
|
|
|
|
mesh = (qfv_alias_mesh_t *) ((byte *) hdr + hdr->commands);
|
|
|
|
dfunc->vkDestroyBuffer (device->dev, mesh->vertex_buffer, 0);
|
|
|
|
dfunc->vkDestroyBuffer (device->dev, mesh->uv_buffer, 0);
|
|
|
|
dfunc->vkDestroyBuffer (device->dev, mesh->index_buffer, 0);
|
|
|
|
dfunc->vkFreeMemory (device->dev, mesh->memory, 0);
|
|
|
|
|
|
|
|
__auto_type skins = (maliasskindesc_t *) ((byte *) hdr + hdr->skindesc);
|
|
|
|
for (int i = 0; i < hdr->mdl.numskins; i++) {
|
|
|
|
if (skins[i].type == ALIAS_SKIN_GROUP) {
|
|
|
|
__auto_type group = (maliasskingroup_t *)
|
|
|
|
((byte *) hdr + skins[i].skin);
|
|
|
|
for (int j = 0; j < group->numskins; j++) {
|
2021-01-26 11:33:51 +00:00
|
|
|
skin_clear (group->skindescs[j].skin, hdr, ctx);
|
2021-01-26 04:46:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
2021-01-26 11:33:51 +00:00
|
|
|
skin_clear (skins[i].skin, hdr, ctx);
|
2021-01-26 04:46:33 +00:00
|
|
|
}
|
|
|
|
}
|
2021-01-25 15:55:45 +00:00
|
|
|
}
|
|
|
|
|
2022-11-15 04:09:41 +00:00
|
|
|
#define SKIN_LAYERS 3
|
|
|
|
|
[model] Make alias skin loading a batch operation
Really, this won't make all that much difference because alias models
with more than one skin are quite rare, and those with animated skin
groups are even rarer. However, for those models that do have more than
one skin, it will allow for reduced allocation overheads, and when
supported (glsl, vulkan, maybe gl), loading all the skins into an array
texture (since all skins are the same size, though external skins may
vary), but that's not implemented yet, this just wraps the old one skin
at a time code.
2022-04-04 06:38:27 +00:00
|
|
|
static void *
|
2021-02-01 12:11:45 +00:00
|
|
|
Vulkan_Mod_LoadSkin (mod_alias_ctx_t *alias_ctx, byte *skinpix, int skinsize,
|
|
|
|
int snum, int gnum, qboolean group,
|
|
|
|
maliasskindesc_t *skindesc, vulkan_ctx_t *ctx)
|
2021-01-25 15:55:45 +00:00
|
|
|
{
|
2021-12-02 13:48:50 +00:00
|
|
|
qfvPushDebug (ctx, va (ctx->va_ctx, "alias.load_skin: %s", alias_ctx->mod->name));
|
2021-02-01 15:11:47 +00:00
|
|
|
qfv_device_t *device = ctx->device;
|
|
|
|
qfv_devfuncs_t *dfunc = device->funcs;
|
2021-02-01 12:11:45 +00:00
|
|
|
aliashdr_t *header = alias_ctx->header;
|
2021-02-02 10:53:36 +00:00
|
|
|
qfv_alias_skin_t *skin;
|
2021-01-25 15:55:45 +00:00
|
|
|
byte *tskin;
|
|
|
|
int w, h;
|
|
|
|
|
2021-07-28 06:01:45 +00:00
|
|
|
skin = Hunk_Alloc (0, sizeof (qfv_alias_skin_t));
|
2022-11-15 02:47:28 +00:00
|
|
|
QuatSet (TOP_RANGE + 7, BOTTOM_RANGE + 7, 0, 0, skin->colors);
|
2021-02-01 12:11:45 +00:00
|
|
|
skindesc->skin = (byte *) skin - (byte *) header;
|
2021-01-26 04:46:33 +00:00
|
|
|
//FIXME move all skins into arrays(?)
|
2021-02-01 12:11:45 +00:00
|
|
|
w = header->mdl.skinwidth;
|
|
|
|
h = header->mdl.skinheight;
|
2021-01-26 11:33:51 +00:00
|
|
|
tskin = malloc (2 * skinsize);
|
2021-01-27 03:15:45 +00:00
|
|
|
memcpy (tskin, skinpix, skinsize);
|
2021-01-25 15:55:45 +00:00
|
|
|
Mod_FloodFillSkin (tskin, w, h);
|
2021-01-26 11:33:51 +00:00
|
|
|
|
2021-02-01 15:11:47 +00:00
|
|
|
int mipLevels = QFV_MipLevels (w, h);
|
|
|
|
VkExtent3D extent = { w, h, 1 };
|
|
|
|
skin->image = QFV_CreateImage (device, 0, VK_IMAGE_TYPE_2D,
|
|
|
|
VK_FORMAT_R8G8B8A8_UNORM, extent,
|
2022-11-15 04:09:41 +00:00
|
|
|
mipLevels, 3, VK_SAMPLE_COUNT_1_BIT,
|
2021-02-01 15:11:47 +00:00
|
|
|
VK_IMAGE_USAGE_SAMPLED_BIT
|
|
|
|
| VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
|
|
|
| VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
|
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_IMAGE, skin->image,
|
|
|
|
va (ctx->va_ctx, "image:%s:%d:%d",
|
|
|
|
alias_ctx->mod->name, snum, gnum));
|
|
|
|
skin->memory = QFV_AllocImageMemory (device, skin->image,
|
|
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
|
|
|
0, 0);
|
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_DEVICE_MEMORY, skin->memory,
|
|
|
|
va (ctx->va_ctx, "memory:%s:%d:%d",
|
|
|
|
alias_ctx->mod->name, snum, gnum));
|
|
|
|
QFV_BindImageMemory (device, skin->image, skin->memory, 0);
|
|
|
|
skin->view = QFV_CreateImageView (device, skin->image,
|
|
|
|
VK_IMAGE_VIEW_TYPE_2D_ARRAY,
|
|
|
|
VK_FORMAT_R8G8B8A8_UNORM,
|
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_IMAGE_VIEW, skin->view,
|
|
|
|
va (ctx->va_ctx, "iview:%s:%d:%d",
|
|
|
|
alias_ctx->mod->name, snum, gnum));
|
|
|
|
|
|
|
|
qfv_stagebuf_t *stage = QFV_CreateStagingBuffer (device, "alias stage",
|
2022-11-15 04:09:41 +00:00
|
|
|
SKIN_LAYERS * skinsize * 4,
|
2021-02-01 15:11:47 +00:00
|
|
|
ctx->cmdpool);
|
|
|
|
qfv_packet_t *packet = QFV_PacketAcquire (stage);
|
|
|
|
byte *base_data = QFV_PacketExtend (packet, skinsize * 4);
|
|
|
|
byte *glow_data = QFV_PacketExtend (packet, skinsize * 4);
|
2022-11-15 04:09:41 +00:00
|
|
|
byte *cmap_data = QFV_PacketExtend (packet, skinsize * 4);
|
2021-02-01 15:11:47 +00:00
|
|
|
|
2021-12-07 03:51:33 +00:00
|
|
|
Mod_CalcFullbright (tskin + skinsize, tskin, skinsize);
|
2021-02-01 15:11:47 +00:00
|
|
|
Vulkan_ExpandPalette (glow_data, tskin + skinsize, vid.palette, 1,
|
|
|
|
skinsize);
|
|
|
|
Mod_ClearFullbright (tskin, tskin, skinsize);
|
|
|
|
|
2022-11-15 04:09:41 +00:00
|
|
|
Skin_CalcTopColors (cmap_data + 0, tskin, skinsize, 4);
|
|
|
|
Skin_CalcTopMask (cmap_data + 1, tskin, skinsize, 4);
|
|
|
|
Skin_CalcBottomColors (cmap_data + 2, tskin, skinsize, 4);
|
|
|
|
Skin_CalcBottomMask (cmap_data + 3, tskin, skinsize, 4);
|
2021-02-01 15:11:47 +00:00
|
|
|
Skin_ClearTopColors (tskin, tskin, skinsize);
|
|
|
|
Skin_ClearBottomColors (tskin, tskin, skinsize);
|
|
|
|
|
|
|
|
Vulkan_ExpandPalette (base_data, tskin, vid.palette, 1, skinsize);
|
|
|
|
|
2021-04-24 03:42:29 +00:00
|
|
|
qfv_imagebarrier_t ib = imageBarriers[qfv_LT_Undefined_to_TransferDst];
|
|
|
|
ib.barrier.image = skin->image;
|
|
|
|
ib.barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
|
|
|
|
ib.barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
|
|
|
|
dfunc->vkCmdPipelineBarrier (packet->cmd, ib.srcStages, ib.dstStages,
|
2021-02-01 15:11:47 +00:00
|
|
|
0, 0, 0, 0, 0,
|
2021-04-24 03:42:29 +00:00
|
|
|
1, &ib.barrier);
|
2021-02-01 15:11:47 +00:00
|
|
|
|
|
|
|
VkBufferImageCopy copy = {
|
|
|
|
packet->offset, 0, 0,
|
2022-11-15 04:09:41 +00:00
|
|
|
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, SKIN_LAYERS},
|
2021-02-01 15:11:47 +00:00
|
|
|
{0, 0, 0}, {w, h, 1},
|
|
|
|
};
|
|
|
|
dfunc->vkCmdCopyBufferToImage (packet->cmd, packet->stage->buffer,
|
|
|
|
skin->image,
|
|
|
|
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
|
|
|
1, ©);
|
|
|
|
|
|
|
|
if (mipLevels == 1) {
|
2021-04-24 03:42:29 +00:00
|
|
|
ib = imageBarriers[qfv_LT_TransferDst_to_ShaderReadOnly];
|
|
|
|
ib.barrier.image = skin->image;
|
|
|
|
ib.barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
|
|
|
|
ib.barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
|
|
|
|
dfunc->vkCmdPipelineBarrier (packet->cmd, ib.srcStages, ib.dstStages,
|
2021-02-01 15:11:47 +00:00
|
|
|
0, 0, 0, 0, 0,
|
2021-04-24 03:42:29 +00:00
|
|
|
1, &ib.barrier);
|
2021-02-01 15:11:47 +00:00
|
|
|
} else {
|
|
|
|
QFV_GenerateMipMaps (device, packet->cmd, skin->image,
|
2022-11-15 04:09:41 +00:00
|
|
|
mipLevels, w, h, SKIN_LAYERS);
|
2021-01-26 11:33:51 +00:00
|
|
|
}
|
2021-02-01 15:11:47 +00:00
|
|
|
QFV_PacketSubmit (packet);
|
|
|
|
QFV_DestroyStagingBuffer (stage);
|
2021-01-26 11:33:51 +00:00
|
|
|
|
2021-01-25 15:55:45 +00:00
|
|
|
free (tskin);
|
2021-01-26 11:33:51 +00:00
|
|
|
|
2021-12-08 15:25:50 +00:00
|
|
|
Vulkan_AliasAddSkin (ctx, skin);
|
|
|
|
|
2021-12-02 13:48:50 +00:00
|
|
|
qfvPopDebug (ctx);
|
2021-01-27 07:13:37 +00:00
|
|
|
return skinpix + skinsize;
|
2021-01-25 15:55:45 +00:00
|
|
|
}
|
|
|
|
|
[model] Make alias skin loading a batch operation
Really, this won't make all that much difference because alias models
with more than one skin are quite rare, and those with animated skin
groups are even rarer. However, for those models that do have more than
one skin, it will allow for reduced allocation overheads, and when
supported (glsl, vulkan, maybe gl), loading all the skins into an array
texture (since all skins are the same size, though external skins may
vary), but that's not implemented yet, this just wraps the old one skin
at a time code.
2022-04-04 06:38:27 +00:00
|
|
|
void
|
|
|
|
Vulkan_Mod_LoadAllSkins (mod_alias_ctx_t *alias_ctx, vulkan_ctx_t *ctx)
|
|
|
|
{
|
|
|
|
aliashdr_t *header = alias_ctx->header;
|
|
|
|
int skinsize = header->mdl.skinwidth * header->mdl.skinheight;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < alias_ctx->skins.size; i++) {
|
|
|
|
__auto_type skin = alias_ctx->skins.a + i;
|
|
|
|
Vulkan_Mod_LoadSkin (alias_ctx, skin->texels, skinsize,
|
|
|
|
skin->skin_num, skin->group_num,
|
|
|
|
skin->group_num != -1, skin->skindesc, ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-25 15:55:45 +00:00
|
|
|
void
|
2021-02-01 12:11:45 +00:00
|
|
|
Vulkan_Mod_FinalizeAliasModel (mod_alias_ctx_t *alias_ctx, vulkan_ctx_t *ctx)
|
2021-01-25 15:55:45 +00:00
|
|
|
{
|
2021-02-01 12:11:45 +00:00
|
|
|
alias_ctx->mod->clear = vulkan_alias_clear;
|
|
|
|
alias_ctx->mod->data = ctx;
|
2021-01-25 15:55:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-02-01 12:11:45 +00:00
|
|
|
Vulkan_Mod_LoadExternalSkins (mod_alias_ctx_t *alias_ctx, vulkan_ctx_t *ctx)
|
2021-01-25 15:55:45 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2021-01-26 04:46:33 +00:00
|
|
|
static size_t
|
|
|
|
get_buffer_size (qfv_device_t *device, VkBuffer buffer)
|
|
|
|
{
|
|
|
|
qfv_devfuncs_t *dfunc = device->funcs;
|
|
|
|
size_t size;
|
|
|
|
size_t align;
|
|
|
|
|
|
|
|
VkMemoryRequirements requirements;
|
|
|
|
dfunc->vkGetBufferMemoryRequirements (device->dev, buffer, &requirements);
|
|
|
|
size = requirements.size;
|
|
|
|
align = requirements.alignment - 1;
|
|
|
|
size = (size + align) & ~(align);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2021-01-25 15:55:45 +00:00
|
|
|
void
|
2021-02-01 12:11:45 +00:00
|
|
|
Vulkan_Mod_MakeAliasModelDisplayLists (mod_alias_ctx_t *alias_ctx, void *_m,
|
2021-01-25 15:55:45 +00:00
|
|
|
int _s, int extra, vulkan_ctx_t *ctx)
|
|
|
|
{
|
2021-02-01 12:11:45 +00:00
|
|
|
aliashdr_t *header = alias_ctx->header;
|
2021-01-25 15:55:45 +00:00
|
|
|
qfv_device_t *device = ctx->device;
|
|
|
|
qfv_devfuncs_t *dfunc = device->funcs;
|
|
|
|
aliasvrt_t *verts;
|
|
|
|
aliasuv_t *uv;
|
|
|
|
trivertx_t *pv;
|
|
|
|
int *indexmap;
|
|
|
|
uint32_t *indices;
|
|
|
|
int numverts;
|
|
|
|
int numtris;
|
|
|
|
int i, j;
|
|
|
|
int pose;
|
|
|
|
vec3_t pos;
|
|
|
|
|
2021-02-01 12:11:45 +00:00
|
|
|
if (header->mdl.ident == HEADER_MDL16)
|
|
|
|
VectorScale (header->mdl.scale, 1/256.0, header->mdl.scale);
|
2021-01-26 04:46:33 +00:00
|
|
|
|
2021-02-01 12:11:45 +00:00
|
|
|
numverts = header->mdl.numverts;
|
|
|
|
numtris = header->mdl.numtris;
|
2021-01-25 15:55:45 +00:00
|
|
|
|
|
|
|
// initialize indexmap to -1 (unduplicated). any other value indicates
|
|
|
|
// both that the vertex has been duplicated and the index of the
|
|
|
|
// duplicate vertex.
|
|
|
|
indexmap = malloc (numverts * sizeof (int));
|
|
|
|
memset (indexmap, -1, numverts * sizeof (int));
|
|
|
|
|
|
|
|
// check for onseam verts, and duplicate any that are associated with
|
|
|
|
// back-facing triangles
|
|
|
|
for (i = 0; i < numtris; i++) {
|
|
|
|
for (j = 0; j < 3; j++) {
|
2021-02-01 12:11:45 +00:00
|
|
|
int vind = alias_ctx->triangles.a[i].vertindex[j];
|
|
|
|
if (alias_ctx->stverts.a[vind].onseam
|
|
|
|
&& !alias_ctx->triangles.a[i].facesfront) {
|
2021-01-25 15:55:45 +00:00
|
|
|
// duplicate the vertex if it has not alreaddy been
|
|
|
|
// duplicated
|
|
|
|
if (indexmap[vind] == -1) {
|
|
|
|
indexmap[vind] = numverts++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we now know exactly how many vertices we need, so built the vertex
|
|
|
|
// and index data arrays
|
|
|
|
// The layout is:
|
|
|
|
// vbuf:{vertex, normal} * (numposes * numverts)
|
|
|
|
// uvbuf:{uv} * (numverts)
|
|
|
|
// ibuf:{index} * (numtris * 3)
|
|
|
|
// numverts includes the duplicated seam vertices.
|
|
|
|
// The vertex buffer will be bound with various offsets based on the
|
|
|
|
// current and previous pose, uvbuff "statically" bound as uvs are not
|
|
|
|
// animated by pose, and the same for ibuf: indices will never change for
|
|
|
|
// the mesh
|
2021-02-01 12:11:45 +00:00
|
|
|
size_t vert_count = numverts * header->numposes;
|
2021-01-25 15:55:45 +00:00
|
|
|
size_t vert_size = vert_count * sizeof (aliasvrt_t);
|
|
|
|
size_t uv_size = numverts * sizeof (aliasuv_t);
|
2021-01-26 04:46:33 +00:00
|
|
|
size_t ind_size = 3 * numtris * sizeof (uint32_t);
|
2021-01-25 15:55:45 +00:00
|
|
|
|
2021-01-26 04:46:33 +00:00
|
|
|
VkBuffer vbuff = QFV_CreateBuffer (device, vert_size,
|
2021-01-25 15:55:45 +00:00
|
|
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT
|
|
|
|
| VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
|
2021-01-26 04:46:33 +00:00
|
|
|
VkBuffer uvbuff = QFV_CreateBuffer (device, uv_size,
|
2021-01-25 15:55:45 +00:00
|
|
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT
|
|
|
|
| VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
|
2021-01-26 04:46:33 +00:00
|
|
|
VkBuffer ibuff = QFV_CreateBuffer (device, ind_size,
|
2021-01-25 15:55:45 +00:00
|
|
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT
|
|
|
|
| VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
|
2021-01-31 10:58:55 +00:00
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_BUFFER, vbuff,
|
|
|
|
va (ctx->va_ctx, "buffer:alias:vertex:%s",
|
2021-02-01 12:11:45 +00:00
|
|
|
alias_ctx->mod->name));
|
2021-01-31 10:58:55 +00:00
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_BUFFER, uvbuff,
|
|
|
|
va (ctx->va_ctx, "buffer:alias:uv:%s",
|
2021-02-01 12:11:45 +00:00
|
|
|
alias_ctx->mod->name));
|
2021-01-31 10:58:55 +00:00
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_BUFFER, ibuff,
|
|
|
|
va (ctx->va_ctx, "buffer:alias:index:%s",
|
2021-02-01 12:11:45 +00:00
|
|
|
alias_ctx->mod->name));
|
2021-01-26 04:46:33 +00:00
|
|
|
size_t voffs = 0;
|
|
|
|
size_t uvoffs = voffs + get_buffer_size (device, vbuff);
|
|
|
|
size_t ioffs = uvoffs + get_buffer_size (device, uvbuff);
|
|
|
|
size_t buff_size = ioffs + get_buffer_size (device, ibuff);
|
2021-01-25 15:55:45 +00:00
|
|
|
VkDeviceMemory mem;
|
|
|
|
mem = QFV_AllocBufferMemory (device, vbuff,
|
|
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
|
|
|
buff_size, 0);
|
2021-01-31 10:58:55 +00:00
|
|
|
QFV_duSetObjectName (device, VK_OBJECT_TYPE_DEVICE_MEMORY, mem,
|
|
|
|
va (ctx->va_ctx, "memory:alias:vuvi:%s",
|
2021-02-01 12:11:45 +00:00
|
|
|
alias_ctx->mod->name));
|
2021-01-26 04:46:33 +00:00
|
|
|
QFV_BindBufferMemory (device, vbuff, mem, voffs);
|
|
|
|
QFV_BindBufferMemory (device, uvbuff, mem, uvoffs);
|
|
|
|
QFV_BindBufferMemory (device, ibuff, mem, ioffs);
|
2021-01-25 15:55:45 +00:00
|
|
|
|
2021-01-31 10:58:55 +00:00
|
|
|
qfv_stagebuf_t *stage = QFV_CreateStagingBuffer (device,
|
|
|
|
va (ctx->va_ctx,
|
|
|
|
"alias:%s",
|
2021-02-01 12:11:45 +00:00
|
|
|
alias_ctx->mod->name),
|
2021-01-31 10:58:55 +00:00
|
|
|
buff_size, ctx->cmdpool);
|
2021-01-25 15:55:45 +00:00
|
|
|
qfv_packet_t *packet = QFV_PacketAcquire (stage);
|
|
|
|
verts = QFV_PacketExtend (packet, vert_size);
|
|
|
|
uv = QFV_PacketExtend (packet, uv_size);
|
|
|
|
indices = QFV_PacketExtend (packet, ind_size);
|
|
|
|
|
|
|
|
// populate the uvs, duplicating and shifting any that are on the seam
|
|
|
|
// and associated with back-facing triangles (marked by non-negative
|
|
|
|
// indexmap entry).
|
|
|
|
// the s coordinate is shifted right by half the skin width.
|
2021-02-01 12:11:45 +00:00
|
|
|
for (i = 0; i < header->mdl.numverts; i++) {
|
2021-01-25 15:55:45 +00:00
|
|
|
int vind = indexmap[i];
|
2021-02-01 12:11:45 +00:00
|
|
|
uv[i].u = (float) alias_ctx->stverts.a[i].s / header->mdl.skinwidth;
|
|
|
|
uv[i].v = (float) alias_ctx->stverts.a[i].t / header->mdl.skinheight;
|
2021-01-25 15:55:45 +00:00
|
|
|
if (vind != -1) {
|
|
|
|
uv[vind] = uv[i];
|
|
|
|
uv[vind].u += 0.5;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// poputlate the vertex position and normal data, duplicating for
|
|
|
|
// back-facing on-seam verts (indicated by non-negative indexmap entry)
|
2021-02-01 12:11:45 +00:00
|
|
|
for (i = 0, pose = 0; i < header->numposes; i++, pose += numverts) {
|
|
|
|
for (j = 0; j < header->mdl.numverts; j++) {
|
|
|
|
pv = &alias_ctx->poseverts.a[i][j];
|
2021-01-25 15:55:45 +00:00
|
|
|
if (extra) {
|
2021-02-01 12:11:45 +00:00
|
|
|
VectorMultAdd (pv[header->mdl.numverts].v, 256, pv->v, pos);
|
2021-01-25 15:55:45 +00:00
|
|
|
} else {
|
|
|
|
VectorCopy (pv->v, pos);
|
|
|
|
}
|
2021-02-01 12:11:45 +00:00
|
|
|
VectorCompMultAdd (header->mdl.scale_origin, header->mdl.scale,
|
2021-01-25 15:55:45 +00:00
|
|
|
pos, verts[pose + j].vertex);
|
2021-01-27 07:13:37 +00:00
|
|
|
verts[pose + j].vertex[3] = 1;
|
2021-01-25 15:55:45 +00:00
|
|
|
VectorCopy (vertex_normals[pv->lightnormalindex],
|
|
|
|
verts[pose + j].normal);
|
2021-01-27 07:13:37 +00:00
|
|
|
verts[pose + j].normal[3] = 0;
|
2021-01-25 15:55:45 +00:00
|
|
|
// duplicate on-seam vert associated with back-facing triangle
|
|
|
|
if (indexmap[j] != -1) {
|
|
|
|
verts[pose + indexmap[j]] = verts[pose + j];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now build the indices for DrawElements
|
|
|
|
for (i = 0; i < numtris; i++) {
|
|
|
|
for (j = 0; j < 3; j++) {
|
2021-02-01 12:11:45 +00:00
|
|
|
int vind = alias_ctx->triangles.a[i].vertindex[j];
|
[model] Make alias skin loading a batch operation
Really, this won't make all that much difference because alias models
with more than one skin are quite rare, and those with animated skin
groups are even rarer. However, for those models that do have more than
one skin, it will allow for reduced allocation overheads, and when
supported (glsl, vulkan, maybe gl), loading all the skins into an array
texture (since all skins are the same size, though external skins may
vary), but that's not implemented yet, this just wraps the old one skin
at a time code.
2022-04-04 06:38:27 +00:00
|
|
|
// can't use indexmap to do the test because it indicates only
|
|
|
|
// that the vertex has been duplicated, not whether or not
|
|
|
|
// the vertex is the original or the duplicate
|
2021-02-01 12:11:45 +00:00
|
|
|
if (alias_ctx->stverts.a[vind].onseam
|
|
|
|
&& !alias_ctx->triangles.a[i].facesfront) {
|
2021-01-28 05:14:21 +00:00
|
|
|
vind = indexmap[vind];
|
|
|
|
}
|
|
|
|
indices[3 * i + j] = vind;
|
2021-01-25 15:55:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// finished with indexmap
|
|
|
|
free (indexmap);
|
|
|
|
|
2021-02-01 12:11:45 +00:00
|
|
|
header->poseverts = numverts;
|
2021-01-25 15:55:45 +00:00
|
|
|
|
2021-04-24 06:47:31 +00:00
|
|
|
qfv_bufferbarrier_t bb = bufferBarriers[qfv_BB_Unknown_to_TransferWrite];
|
2021-01-25 15:55:45 +00:00
|
|
|
VkBufferMemoryBarrier wr_barriers[] = {
|
2021-04-24 06:47:31 +00:00
|
|
|
bb.barrier, bb.barrier, bb.barrier,
|
2021-01-25 15:55:45 +00:00
|
|
|
};
|
2021-04-24 06:47:31 +00:00
|
|
|
wr_barriers[0].buffer = vbuff;
|
|
|
|
wr_barriers[0].size = vert_size;
|
|
|
|
wr_barriers[1].buffer = uvbuff;
|
|
|
|
wr_barriers[1].size = uv_size;
|
|
|
|
wr_barriers[2].buffer = ibuff;
|
|
|
|
wr_barriers[2].size = ind_size;
|
|
|
|
dfunc->vkCmdPipelineBarrier (packet->cmd, bb.srcStages, bb.dstStages,
|
2021-01-25 15:55:45 +00:00
|
|
|
0, 0, 0, 3, wr_barriers, 0, 0);
|
|
|
|
VkBufferCopy copy_region[] = {
|
|
|
|
{ packet->offset, 0, vert_size },
|
|
|
|
{ packet->offset + vert_size, 0, uv_size },
|
|
|
|
{ packet->offset + vert_size + uv_size, 0, ind_size },
|
|
|
|
};
|
|
|
|
dfunc->vkCmdCopyBuffer (packet->cmd, stage->buffer,
|
|
|
|
vbuff, 1, ©_region[0]);
|
|
|
|
dfunc->vkCmdCopyBuffer (packet->cmd, stage->buffer,
|
|
|
|
uvbuff, 1, ©_region[1]);
|
|
|
|
dfunc->vkCmdCopyBuffer (packet->cmd, stage->buffer,
|
|
|
|
ibuff, 1, ©_region[2]);
|
2021-04-24 06:47:31 +00:00
|
|
|
// both qfv_BB_TransferWrite_to_VertexAttrRead and
|
|
|
|
// qfv_BB_TransferWrite_to_IndexRead have the same stage flags
|
|
|
|
bb = bufferBarriers[qfv_BB_TransferWrite_to_VertexAttrRead];
|
2021-01-25 15:55:45 +00:00
|
|
|
VkBufferMemoryBarrier rd_barriers[] = {
|
2021-04-24 06:47:31 +00:00
|
|
|
bufferBarriers[qfv_BB_TransferWrite_to_VertexAttrRead].barrier,
|
|
|
|
bufferBarriers[qfv_BB_TransferWrite_to_VertexAttrRead].barrier,
|
|
|
|
bufferBarriers[qfv_BB_TransferWrite_to_IndexRead].barrier,
|
2021-01-25 15:55:45 +00:00
|
|
|
};
|
2021-04-24 06:47:31 +00:00
|
|
|
rd_barriers[0].buffer = vbuff;
|
|
|
|
rd_barriers[0].size = vert_size;
|
|
|
|
rd_barriers[1].buffer = uvbuff;
|
|
|
|
rd_barriers[1].size = uv_size;
|
|
|
|
rd_barriers[2].buffer = ibuff;
|
|
|
|
rd_barriers[2].size = ind_size;
|
|
|
|
dfunc->vkCmdPipelineBarrier (packet->cmd, bb.srcStages, bb.dstStages,
|
2021-01-27 07:13:37 +00:00
|
|
|
0, 0, 0, 3, rd_barriers, 0, 0);
|
2021-01-25 15:55:45 +00:00
|
|
|
QFV_PacketSubmit (packet);
|
|
|
|
QFV_DestroyStagingBuffer (stage);
|
|
|
|
|
2021-07-28 06:01:45 +00:00
|
|
|
qfv_alias_mesh_t *mesh = Hunk_Alloc (0, sizeof (qfv_alias_mesh_t));
|
2021-01-25 15:55:45 +00:00
|
|
|
mesh->vertex_buffer = vbuff;
|
|
|
|
mesh->uv_buffer = uvbuff;
|
|
|
|
mesh->index_buffer = ibuff;
|
|
|
|
mesh->memory = mem;
|
2021-02-01 12:11:45 +00:00
|
|
|
header->commands = (byte *) mesh - (byte *) header;
|
2021-01-25 15:55:45 +00:00
|
|
|
}
|