Newer
Older
Conor McCarthy
committed
/*
* Copyright 2023 Conor McCarthy for CodeWeavers
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#include "vkd3d_shader_private.h"
bool vsir_program_init(struct vsir_program *program, const struct vkd3d_shader_version *version, unsigned int reserve)
program->shader_version = *version;
return shader_instruction_array_init(&program->instructions, reserve);
}
void vsir_program_cleanup(struct vsir_program *program)
{
size_t i;
for (i = 0; i < program->block_name_count; ++i)
vkd3d_free((void *)program->block_names[i]);
vkd3d_free(program->block_names);
shader_instruction_array_destroy(&program->instructions);
}
Conor McCarthy
committed
static inline bool shader_register_is_phase_instance_id(const struct vkd3d_shader_register *reg)
{
return reg->type == VKD3DSPR_FORKINSTID || reg->type == VKD3DSPR_JOININSTID;
}
static bool vsir_instruction_is_dcl(const struct vkd3d_shader_instruction *instruction)
Conor McCarthy
committed
{
enum vkd3d_shader_opcode handler_idx = instruction->handler_idx;
return (VKD3DSIH_DCL <= handler_idx && handler_idx <= VKD3DSIH_DCL_VERTICES_OUT)
|| handler_idx == VKD3DSIH_HS_DECLS;
Conor McCarthy
committed
}
static void vkd3d_shader_instruction_make_nop(struct vkd3d_shader_instruction *ins)
{
struct vkd3d_shader_location location = ins->location;
vsir_instruction_init(ins, &location, VKD3DSIH_NOP);
Conor McCarthy
committed
}
static void remove_dcl_temps(struct vsir_program *program)
{
unsigned int i;
for (i = 0; i < program->instructions.count; ++i)
{
struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
if (ins->handler_idx == VKD3DSIH_DCL_TEMPS)
vkd3d_shader_instruction_make_nop(ins);
}
}
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
static bool vsir_instruction_init_with_params(struct vkd3d_shader_parser *parser,
struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location,
enum vkd3d_shader_opcode handler_idx, unsigned int dst_count, unsigned int src_count)
{
vsir_instruction_init(ins, location, handler_idx);
ins->dst_count = dst_count;
ins->src_count = src_count;
if (!(ins->dst = shader_parser_get_dst_params(parser, ins->dst_count)))
{
ERR("Failed to allocate %u destination parameters.\n", dst_count);
return false;
}
if (!(ins->src = shader_parser_get_src_params(parser, ins->src_count)))
{
ERR("Failed to allocate %u source parameters.\n", src_count);
return false;
}
memset(ins->dst, 0, sizeof(*ins->dst) * ins->dst_count);
memset(ins->src, 0, sizeof(*ins->src) * ins->src_count);
return true;
}
static enum vkd3d_result instruction_array_lower_texkills(struct vkd3d_shader_parser *parser)
{
struct vsir_program *program = &parser->program;
struct vkd3d_shader_instruction_array *instructions = &program->instructions;
struct vkd3d_shader_instruction *texkill_ins, *ins;
unsigned int components_read = 3 + (program->shader_version.major >= 2);
unsigned int tmp_idx = ~0u;
unsigned int i, k;
for (i = 0; i < instructions->count; ++i)
{
texkill_ins = &instructions->elements[i];
if (texkill_ins->handler_idx != VKD3DSIH_TEXKILL)
continue;
if (!shader_instruction_array_insert_at(instructions, i + 1, components_read + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
if (tmp_idx == ~0u)
tmp_idx = program->temp_count++;
/* tmp = ins->dst[0] < 0 */
ins = &instructions->elements[i + 1];
if (!vsir_instruction_init_with_params(parser, ins, &texkill_ins->location, VKD3DSIH_LTO, 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
ins->dst[0].reg.idx[0].offset = tmp_idx;
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL;
ins->src[0].reg = texkill_ins->dst[0].reg;
vsir_register_init(&ins->src[1].reg, VKD3DSPR_IMMCONST, VKD3D_DATA_FLOAT, 0);
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
ins->src[1].reg.u.immconst_f32[0] = 0.0f;
ins->src[1].reg.u.immconst_f32[1] = 0.0f;
ins->src[1].reg.u.immconst_f32[2] = 0.0f;
ins->src[1].reg.u.immconst_f32[3] = 0.0f;
/* tmp.x = tmp.x || tmp.y */
/* tmp.x = tmp.x || tmp.z */
/* tmp.x = tmp.x || tmp.w, if sm >= 2.0 */
for (k = 1; k < components_read; ++k)
{
ins = &instructions->elements[i + 1 + k];
if (!(vsir_instruction_init_with_params(parser, ins, &texkill_ins->location, VKD3DSIH_OR, 1, 2)))
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
ins->dst[0].reg.idx[0].offset = tmp_idx;
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0;
vsir_register_init(&ins->src[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
ins->src[0].reg.idx[0].offset = tmp_idx;
ins->src[0].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
vsir_register_init(&ins->src[1].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
ins->src[1].reg.idx[0].offset = tmp_idx;
ins->src[1].swizzle = vkd3d_shader_create_swizzle(k, k, k, k);
}
/* discard_nz tmp.x */
ins = &instructions->elements[i + 1 + components_read];
if (!(vsir_instruction_init_with_params(parser, ins, &texkill_ins->location, VKD3DSIH_DISCARD, 0, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_NZ;
vsir_register_init(&ins->src[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
ins->src[0].reg.idx[0].offset = tmp_idx;
ins->src[0].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
/* Make the original instruction no-op */
vkd3d_shader_instruction_make_nop(texkill_ins);
}
return VKD3D_OK;
}
Conor McCarthy
committed
static void shader_register_eliminate_phase_addressing(struct vkd3d_shader_register *reg,
unsigned int instance_id)
{
unsigned int i;
for (i = 0; i < reg->idx_count; ++i)
Conor McCarthy
committed
{
if (reg->idx[i].rel_addr && shader_register_is_phase_instance_id(®->idx[i].rel_addr->reg))
{
reg->idx[i].rel_addr = NULL;
reg->idx[i].offset += instance_id;
}
}
}
static void shader_instruction_eliminate_phase_instance_id(struct vkd3d_shader_instruction *ins,
unsigned int instance_id)
{
struct vkd3d_shader_register *reg;
unsigned int i;
for (i = 0; i < ins->src_count; ++i)
{
reg = (struct vkd3d_shader_register *)&ins->src[i].reg;
if (shader_register_is_phase_instance_id(reg))
{
vsir_register_init(reg, VKD3DSPR_IMMCONST, reg->data_type, 0);
reg->u.immconst_u32[0] = instance_id;
Conor McCarthy
committed
continue;
}
shader_register_eliminate_phase_addressing(reg, instance_id);
}
for (i = 0; i < ins->dst_count; ++i)
shader_register_eliminate_phase_addressing((struct vkd3d_shader_register *)&ins->dst[i].reg, instance_id);
}
Elizabeth Figura
committed
static const struct vkd3d_shader_varying_map *find_varying_map(
Elizabeth Figura
committed
const struct vkd3d_shader_varying_map_info *varying_map, unsigned int signature_idx)
Elizabeth Figura
committed
{
unsigned int i;
Elizabeth Figura
committed
for (i = 0; i < varying_map->varying_count; ++i)
Elizabeth Figura
committed
{
Elizabeth Figura
committed
if (varying_map->varying_map[i].output_signature_index == signature_idx)
return &varying_map->varying_map[i];
Elizabeth Figura
committed
}
return NULL;
}
static enum vkd3d_result remap_output_signature(struct vkd3d_shader_parser *parser,
const struct vkd3d_shader_compile_info *compile_info)
{
struct shader_signature *signature = &parser->shader_desc.output_signature;
Elizabeth Figura
committed
const struct vkd3d_shader_varying_map_info *varying_map;
Elizabeth Figura
committed
unsigned int i;
Elizabeth Figura
committed
if (!(varying_map = vkd3d_find_struct(compile_info->next, VARYING_MAP_INFO)))
Elizabeth Figura
committed
return VKD3D_OK;
for (i = 0; i < signature->element_count; ++i)
{
Elizabeth Figura
committed
const struct vkd3d_shader_varying_map *map = find_varying_map(varying_map, i);
Elizabeth Figura
committed
struct signature_element *e = &signature->elements[i];
if (map)
{
unsigned int input_mask = map->input_mask;
e->target_location = map->input_register_index;
/* It is illegal in Vulkan if the next shader uses the same varying
* location with a different mask. */
if (input_mask && input_mask != e->mask)
{
vkd3d_shader_parser_error(parser, VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED,
"Aborting due to not yet implemented feature: "
"Output mask %#x does not match input mask %#x.",
e->mask, input_mask);
return VKD3D_ERROR_NOT_IMPLEMENTED;
}
}
else
{
Elizabeth Figura
committed
e->target_location = SIGNATURE_TARGET_LOCATION_UNUSED;
Elizabeth Figura
committed
}
}
Elizabeth Figura
committed
for (i = 0; i < varying_map->varying_count; ++i)
Elizabeth Figura
committed
{
Elizabeth Figura
committed
if (varying_map->varying_map[i].output_signature_index >= signature->element_count)
Elizabeth Figura
committed
{
vkd3d_shader_parser_error(parser, VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED,
"Aborting due to not yet implemented feature: "
"The next stage consumes varyings not written by this stage.");
return VKD3D_ERROR_NOT_IMPLEMENTED;
}
}
return VKD3D_OK;
}
struct hull_flattener
Conor McCarthy
committed
{
struct vkd3d_shader_instruction_array instructions;
unsigned int instance_count;
unsigned int phase_body_idx;
enum vkd3d_shader_opcode phase;
struct vkd3d_shader_location last_ret_location;
Conor McCarthy
committed
static bool flattener_is_in_fork_or_join_phase(const struct hull_flattener *flattener)
Conor McCarthy
committed
{
return flattener->phase == VKD3DSIH_HS_FORK_PHASE || flattener->phase == VKD3DSIH_HS_JOIN_PHASE;
Conor McCarthy
committed
}
struct shader_phase_location
{
unsigned int index;
unsigned int instance_count;
unsigned int instruction_count;
};
struct shader_phase_location_array
{
/* Unlikely worst case: one phase for each component of each output register. */
struct shader_phase_location locations[MAX_REG_OUTPUT * VKD3D_VEC4_SIZE];
unsigned int count;
};
static void flattener_eliminate_phase_related_dcls(struct hull_flattener *normaliser,
Conor McCarthy
committed
unsigned int index, struct shader_phase_location_array *locations)
{
struct vkd3d_shader_instruction *ins = &normaliser->instructions.elements[index];
struct shader_phase_location *loc;
bool b;
if (ins->handler_idx == VKD3DSIH_HS_FORK_PHASE || ins->handler_idx == VKD3DSIH_HS_JOIN_PHASE)
{
b = flattener_is_in_fork_or_join_phase(normaliser);
Conor McCarthy
committed
/* Reset the phase info. */
normaliser->phase_body_idx = ~0u;
normaliser->phase = ins->handler_idx;
normaliser->instance_count = 1;
/* Leave the first occurrence and delete the rest. */
if (b)
vkd3d_shader_instruction_make_nop(ins);
return;
}
else if (ins->handler_idx == VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT
|| ins->handler_idx == VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT)
{
normaliser->instance_count = ins->declaration.count + !ins->declaration.count;
vkd3d_shader_instruction_make_nop(ins);
return;
}
else if (ins->handler_idx == VKD3DSIH_DCL_INPUT && shader_register_is_phase_instance_id(
&ins->declaration.dst.reg))
{
vkd3d_shader_instruction_make_nop(ins);
return;
}
if (normaliser->phase == VKD3DSIH_INVALID || vsir_instruction_is_dcl(ins))
Conor McCarthy
committed
return;
if (normaliser->phase_body_idx == ~0u)
normaliser->phase_body_idx = index;
if (ins->handler_idx == VKD3DSIH_RET)
{
normaliser->last_ret_location = ins->location;
Conor McCarthy
committed
vkd3d_shader_instruction_make_nop(ins);
if (locations->count >= ARRAY_SIZE(locations->locations))
{
FIXME("Insufficient space for phase location.\n");
return;
}
loc = &locations->locations[locations->count++];
loc->index = normaliser->phase_body_idx;
loc->instance_count = normaliser->instance_count;
loc->instruction_count = index - normaliser->phase_body_idx;
}
}
static enum vkd3d_result flattener_flatten_phases(struct hull_flattener *normaliser,
Conor McCarthy
committed
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
struct shader_phase_location_array *locations)
{
struct shader_phase_location *loc;
unsigned int i, j, k, end, count;
for (i = 0, count = 0; i < locations->count; ++i)
count += (locations->locations[i].instance_count - 1) * locations->locations[i].instruction_count;
if (!shader_instruction_array_reserve(&normaliser->instructions, normaliser->instructions.count + count))
return VKD3D_ERROR_OUT_OF_MEMORY;
end = normaliser->instructions.count;
normaliser->instructions.count += count;
for (i = locations->count; i > 0; --i)
{
loc = &locations->locations[i - 1];
j = loc->index + loc->instruction_count;
memmove(&normaliser->instructions.elements[j + count], &normaliser->instructions.elements[j],
(end - j) * sizeof(*normaliser->instructions.elements));
end = j;
count -= (loc->instance_count - 1) * loc->instruction_count;
loc->index += count;
}
for (i = 0, count = 0; i < locations->count; ++i)
{
loc = &locations->locations[i];
/* Make a copy of the non-dcl instructions for each instance. */
for (j = 1; j < loc->instance_count; ++j)
{
for (k = 0; k < loc->instruction_count; ++k)
{
if (!shader_instruction_array_clone_instruction(&normaliser->instructions,
loc->index + loc->instruction_count * j + k, loc->index + k))
return VKD3D_ERROR_OUT_OF_MEMORY;
}
}
/* Replace each reference to the instance id with a constant instance id. */
for (j = 0; j < loc->instance_count; ++j)
{
for (k = 0; k < loc->instruction_count; ++k)
shader_instruction_eliminate_phase_instance_id(
&normaliser->instructions.elements[loc->index + loc->instruction_count * j + k], j);
}
}
return VKD3D_OK;
}
void vsir_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type,
enum vkd3d_data_type data_type, unsigned int idx_count)
Conor McCarthy
committed
{
reg->type = reg_type;
reg->precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT;
reg->non_uniform = false;
reg->data_type = data_type;
reg->idx[0].offset = ~0u;
reg->idx[0].rel_addr = NULL;
reg->idx[0].is_in_bounds = false;
Conor McCarthy
committed
reg->idx[1].offset = ~0u;
reg->idx[1].rel_addr = NULL;
reg->idx[1].is_in_bounds = false;
Conor McCarthy
committed
reg->idx[2].offset = ~0u;
reg->idx[2].rel_addr = NULL;
reg->idx[2].is_in_bounds = false;
reg->idx_count = idx_count;

Francisco Casas
committed
reg->dimension = VSIR_DIMENSION_SCALAR;
reg->alignment = 0;
Conor McCarthy
committed
}
void vsir_src_param_init(struct vkd3d_shader_src_param *param, enum vkd3d_shader_register_type reg_type,
enum vkd3d_data_type data_type, unsigned int idx_count)
{
vsir_register_init(¶m->reg, reg_type, data_type, idx_count);
param->swizzle = 0;
param->modifiers = VKD3DSPSM_NONE;
}
Conor McCarthy
committed
void vsir_src_param_init_label(struct vkd3d_shader_src_param *param, unsigned int label_id)
{
vsir_src_param_init(param, VKD3DSPR_LABEL, VKD3D_DATA_UINT, 1);
param->reg.dimension = VSIR_DIMENSION_NONE;
param->reg.idx[0].offset = label_id;
}
void vsir_instruction_init(struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location,
enum vkd3d_shader_opcode handler_idx)
Conor McCarthy
committed
{
memset(ins, 0, sizeof(*ins));
ins->location = *location;
Conor McCarthy
committed
ins->handler_idx = handler_idx;
}
static bool vsir_instruction_init_label(struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location,
unsigned int label_id, void *parser)
{
struct vkd3d_shader_src_param *src_param;
if (!(src_param = shader_parser_get_src_params(parser, 1)))
return false;
vsir_src_param_init_label(src_param, label_id);
vsir_instruction_init(ins, location, VKD3DSIH_LABEL);
ins->src = src_param;
ins->src_count = 1;
return true;
}
static enum vkd3d_result instruction_array_flatten_hull_shader_phases(struct vkd3d_shader_instruction_array *src_instructions)
Conor McCarthy
committed
{
struct hull_flattener flattener = {*src_instructions};
struct vkd3d_shader_instruction_array *instructions;
Conor McCarthy
committed
struct shader_phase_location_array locations;
enum vkd3d_result result = VKD3D_OK;
unsigned int i;
instructions = &flattener.instructions;
flattener.phase = VKD3DSIH_INVALID;
Conor McCarthy
committed
for (i = 0, locations.count = 0; i < instructions->count; ++i)
flattener_eliminate_phase_related_dcls(&flattener, i, &locations);
Conor McCarthy
committed
if ((result = flattener_flatten_phases(&flattener, &locations)) < 0)
Conor McCarthy
committed
return result;
if (flattener.phase != VKD3DSIH_INVALID)
Conor McCarthy
committed
{
if (!shader_instruction_array_reserve(&flattener.instructions, flattener.instructions.count + 1))
Conor McCarthy
committed
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_instruction_init(&instructions->elements[instructions->count++], &flattener.last_ret_location, VKD3DSIH_RET);
Conor McCarthy
committed
}
*src_instructions = flattener.instructions;
Conor McCarthy
committed
return result;
}
struct control_point_normaliser
{
struct vkd3d_shader_instruction_array instructions;
enum vkd3d_shader_opcode phase;
struct vkd3d_shader_src_param *outpointid_param;
};
static bool control_point_normaliser_is_in_control_point_phase(const struct control_point_normaliser *normaliser)
{
return normaliser->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE;
}
static struct vkd3d_shader_src_param *instruction_array_create_outpointid_param(
struct vkd3d_shader_instruction_array *instructions)
Conor McCarthy
committed
{
struct vkd3d_shader_src_param *rel_addr;
if (!(rel_addr = shader_src_param_allocator_get(&instructions->src_params, 1)))
Conor McCarthy
committed
return NULL;
vsir_register_init(&rel_addr->reg, VKD3DSPR_OUTPOINTID, VKD3D_DATA_UINT, 0);
Conor McCarthy
committed
rel_addr->swizzle = 0;
rel_addr->modifiers = 0;
return rel_addr;
}
static void shader_dst_param_normalise_outpointid(struct vkd3d_shader_dst_param *dst_param,
struct control_point_normaliser *normaliser)
Conor McCarthy
committed
{
struct vkd3d_shader_register *reg = &dst_param->reg;
if (control_point_normaliser_is_in_control_point_phase(normaliser) && reg->type == VKD3DSPR_OUTPUT)
Conor McCarthy
committed
{
/* The TPF reader validates idx_count. */
assert(reg->idx_count == 1);
Conor McCarthy
committed
reg->idx[1] = reg->idx[0];
/* The control point id param is implicit here. Avoid later complications by inserting it. */
reg->idx[0].offset = 0;
reg->idx[0].rel_addr = normaliser->outpointid_param;
++reg->idx_count;
Conor McCarthy
committed
}
}
static void shader_dst_param_io_init(struct vkd3d_shader_dst_param *param, const struct signature_element *e,
enum vkd3d_shader_register_type reg_type, unsigned int idx_count)
Conor McCarthy
committed
{
param->write_mask = e->mask;
param->modifiers = 0;
param->shift = 0;
vsir_register_init(¶m->reg, reg_type, vkd3d_data_type_from_component_type(e->component_type), idx_count);
Conor McCarthy
committed
}
static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_point_normaliser *normaliser,
const struct shader_signature *s, unsigned int input_control_point_count, unsigned int dst,
const struct vkd3d_shader_location *location)
Conor McCarthy
committed
{
struct vkd3d_shader_instruction *ins;
struct vkd3d_shader_dst_param *param;
const struct signature_element *e;
Conor McCarthy
committed
unsigned int i, count;
for (i = 0, count = 1; i < s->element_count; ++i)
count += !!s->elements[i].used_mask;
if (!shader_instruction_array_reserve(&normaliser->instructions, normaliser->instructions.count + count))
return VKD3D_ERROR_OUT_OF_MEMORY;
memmove(&normaliser->instructions.elements[dst + count], &normaliser->instructions.elements[dst],
(normaliser->instructions.count - dst) * sizeof(*normaliser->instructions.elements));
normaliser->instructions.count += count;
ins = &normaliser->instructions.elements[dst];
vsir_instruction_init(ins, location, VKD3DSIH_HS_CONTROL_POINT_PHASE);
Conor McCarthy
committed
ins->flags = 1;
++ins;
for (i = 0; i < s->element_count; ++i)
{
e = &s->elements[i];
if (!e->used_mask)
continue;
if (e->sysval_semantic != VKD3D_SHADER_SV_NONE)
{
vsir_instruction_init(ins, location, VKD3DSIH_DCL_INPUT_SIV);
Conor McCarthy
committed
param = &ins->declaration.register_semantic.reg;
ins->declaration.register_semantic.sysval_semantic = vkd3d_siv_from_sysval(e->sysval_semantic);
}
else
{
vsir_instruction_init(ins, location, VKD3DSIH_DCL_INPUT);
Conor McCarthy
committed
param = &ins->declaration.dst;
}
shader_dst_param_io_init(param, e, VKD3DSPR_INPUT, 2);
Conor McCarthy
committed
param->reg.idx[0].offset = input_control_point_count;
Elizabeth Figura
committed
param->reg.idx[1].offset = e->register_index;
param->write_mask = e->mask;
Conor McCarthy
committed
++ins;
}
return VKD3D_OK;
}
static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_io(
struct vkd3d_shader_instruction_array *src_instructions, const struct shader_signature *input_signature)
Conor McCarthy
committed
{
struct vkd3d_shader_instruction_array *instructions;
struct control_point_normaliser normaliser;
Conor McCarthy
committed
unsigned int input_control_point_count;
Conor McCarthy
committed
struct vkd3d_shader_location location;
Conor McCarthy
committed
struct vkd3d_shader_instruction *ins;
enum vkd3d_result ret;
Conor McCarthy
committed
unsigned int i, j;
if (!(normaliser.outpointid_param = instruction_array_create_outpointid_param(src_instructions)))
Conor McCarthy
committed
{
ERR("Failed to allocate src param.\n");
return VKD3D_ERROR_OUT_OF_MEMORY;
}
normaliser.instructions = *src_instructions;
instructions = &normaliser.instructions;
normaliser.phase = VKD3DSIH_INVALID;
Conor McCarthy
committed
for (i = 0; i < normaliser.instructions.count; ++i)
Conor McCarthy
committed
{
ins = &instructions->elements[i];
switch (ins->handler_idx)
{
case VKD3DSIH_HS_CONTROL_POINT_PHASE:
case VKD3DSIH_HS_FORK_PHASE:
case VKD3DSIH_HS_JOIN_PHASE:
normaliser.phase = ins->handler_idx;
Conor McCarthy
committed
break;
default:
if (vsir_instruction_is_dcl(ins))
Conor McCarthy
committed
break;
for (j = 0; j < ins->dst_count; ++j)
shader_dst_param_normalise_outpointid(&ins->dst[j], &normaliser);
Conor McCarthy
committed
break;
}
}
normaliser.phase = VKD3DSIH_INVALID;
Conor McCarthy
committed
input_control_point_count = 1;
for (i = 0; i < instructions->count; ++i)
{
ins = &instructions->elements[i];
switch (ins->handler_idx)
{
case VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT:
input_control_point_count = ins->declaration.count;
break;
case VKD3DSIH_HS_CONTROL_POINT_PHASE:
*src_instructions = normaliser.instructions;
Conor McCarthy
committed
return VKD3D_OK;
case VKD3DSIH_HS_FORK_PHASE:
case VKD3DSIH_HS_JOIN_PHASE:
Conor McCarthy
committed
/* ins may be relocated if the instruction array expands. */
location = ins->location;
ret = control_point_normaliser_emit_hs_input(&normaliser, input_signature,
Conor McCarthy
committed
input_control_point_count, i, &location);
*src_instructions = normaliser.instructions;
return ret;
Conor McCarthy
committed
default:
break;
}
}
*src_instructions = normaliser.instructions;
Conor McCarthy
committed
return VKD3D_OK;
}
Conor McCarthy
committed
struct io_normaliser
{
struct vkd3d_shader_instruction_array instructions;
enum vkd3d_shader_type shader_type;
uint8_t major;
Conor McCarthy
committed
struct shader_signature *input_signature;
struct shader_signature *output_signature;
struct shader_signature *patch_constant_signature;
unsigned int instance_count;
unsigned int phase_body_idx;
enum vkd3d_shader_opcode phase;
unsigned int output_control_point_count;
struct vkd3d_shader_src_param *outpointid_param;
struct vkd3d_shader_dst_param *input_dcl_params[MAX_REG_OUTPUT];
struct vkd3d_shader_dst_param *output_dcl_params[MAX_REG_OUTPUT];
struct vkd3d_shader_dst_param *pc_dcl_params[MAX_REG_OUTPUT];
uint8_t input_range_map[MAX_REG_OUTPUT][VKD3D_VEC4_SIZE];
uint8_t output_range_map[MAX_REG_OUTPUT][VKD3D_VEC4_SIZE];
uint8_t pc_range_map[MAX_REG_OUTPUT][VKD3D_VEC4_SIZE];
bool use_vocp;
Conor McCarthy
committed
};
static bool io_normaliser_is_in_fork_or_join_phase(const struct io_normaliser *normaliser)
{
return normaliser->phase == VKD3DSIH_HS_FORK_PHASE || normaliser->phase == VKD3DSIH_HS_JOIN_PHASE;
}
static bool io_normaliser_is_in_control_point_phase(const struct io_normaliser *normaliser)
{
return normaliser->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE;
}
static unsigned int shader_signature_find_element_for_reg(const struct shader_signature *signature,
unsigned int reg_idx, unsigned int write_mask)
{
Conor McCarthy
committed
unsigned int i, base_write_mask;
Conor McCarthy
committed
for (i = 0; i < signature->element_count; ++i)
{
struct signature_element *e = &signature->elements[i];
if (e->register_index <= reg_idx && e->register_index + e->register_count > reg_idx
&& (e->mask & write_mask) == write_mask)
{
return i;
}
}
Conor McCarthy
committed
/* Validated in the TPF reader, but failure in signature_element_range_expand_mask()
* can land us here on an unmatched vector mask. */
FIXME("Failed to find signature element for register index %u, mask %#x; using scalar mask.\n",
reg_idx, write_mask);
base_write_mask = 1u << vsir_write_mask_get_component_idx(write_mask);
if (base_write_mask != write_mask)
return shader_signature_find_element_for_reg(signature, reg_idx, base_write_mask);
Conor McCarthy
committed
vkd3d_unreachable();
}
struct signature_element *vsir_signature_find_element_for_reg(const struct shader_signature *signature,
unsigned int reg_idx, unsigned int write_mask)
{
return &signature->elements[shader_signature_find_element_for_reg(signature, reg_idx, write_mask)];
}
Conor McCarthy
committed
static unsigned int range_map_get_register_count(uint8_t range_map[][VKD3D_VEC4_SIZE],
Henri Verbeet
committed
unsigned int register_idx, uint32_t write_mask)
Conor McCarthy
committed
{
Henri Verbeet
committed
return range_map[register_idx][vsir_write_mask_get_component_idx(write_mask)];
Conor McCarthy
committed
}
static void range_map_set_register_range(uint8_t range_map[][VKD3D_VEC4_SIZE], unsigned int register_idx,
Henri Verbeet
committed
unsigned int register_count, uint32_t write_mask, bool is_dcl_indexrange)
Conor McCarthy
committed
{
unsigned int i, j, r, c, component_idx, component_count;
assert(write_mask <= VKD3DSP_WRITEMASK_ALL);
Henri Verbeet
committed
component_idx = vsir_write_mask_get_component_idx(write_mask);
Henri Verbeet
committed
component_count = vsir_write_mask_component_count(write_mask);
Conor McCarthy
committed
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
assert(register_idx < MAX_REG_OUTPUT && MAX_REG_OUTPUT - register_idx >= register_count);
if (range_map[register_idx][component_idx] > register_count && is_dcl_indexrange)
{
/* Validated in the TPF reader. */
assert(range_map[register_idx][component_idx] != UINT8_MAX);
return;
}
if (range_map[register_idx][component_idx] == register_count)
{
/* Already done. This happens when fxc splits a register declaration by
* component(s). The dcl_indexrange instructions are split too. */
return;
}
range_map[register_idx][component_idx] = register_count;
for (i = 0; i < register_count; ++i)
{
r = register_idx + i;
for (j = !i; j < component_count; ++j)
{
c = component_idx + j;
/* A synthetic patch constant range which overlaps an existing range can start upstream of it
* for fork/join phase instancing, but ranges declared by dcl_indexrange should not overlap.
* The latter is validated in the TPF reader. */
assert(!range_map[r][c] || !is_dcl_indexrange);
range_map[r][c] = UINT8_MAX;
}
}
}
static void io_normaliser_add_index_range(struct io_normaliser *normaliser,
const struct vkd3d_shader_instruction *ins)
{
const struct vkd3d_shader_index_range *range = &ins->declaration.index_range;
const struct vkd3d_shader_register *reg = &range->dst.reg;
unsigned int reg_idx, write_mask, element_idx;
const struct shader_signature *signature;
uint8_t (*range_map)[VKD3D_VEC4_SIZE];
switch (reg->type)
{
case VKD3DSPR_INPUT:
case VKD3DSPR_INCONTROLPOINT:
range_map = normaliser->input_range_map;
signature = normaliser->input_signature;
break;
case VKD3DSPR_OUTCONTROLPOINT:
range_map = normaliser->output_range_map;
signature = normaliser->output_signature;
break;
case VKD3DSPR_OUTPUT:
if (!io_normaliser_is_in_fork_or_join_phase(normaliser))
{
range_map = normaliser->output_range_map;
signature = normaliser->output_signature;
break;
}
/* fall through */
case VKD3DSPR_PATCHCONST:
range_map = normaliser->pc_range_map;
signature = normaliser->patch_constant_signature;
break;
default:
/* Validated in the TPF reader. */
vkd3d_unreachable();
}
reg_idx = reg->idx[reg->idx_count - 1].offset;
write_mask = range->dst.write_mask;
element_idx = shader_signature_find_element_for_reg(signature, reg_idx, write_mask);
range_map_set_register_range(range_map, reg_idx, range->register_count,
signature->elements[element_idx].mask, true);
}
static int signature_element_mask_compare(const void *a, const void *b)
{
const struct signature_element *e = a, *f = b;
int ret;
return (ret = vkd3d_u32_compare(e->mask, f->mask)) ? ret : vkd3d_u32_compare(e->register_index, f->register_index);
}
static bool sysval_semantics_should_merge(const struct signature_element *e, const struct signature_element *f)
{
if (e->sysval_semantic < VKD3D_SHADER_SV_TESS_FACTOR_QUADEDGE
|| e->sysval_semantic > VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN)
return false;
return e->sysval_semantic == f->sysval_semantic
/* Line detail and density must be merged together to match the SPIR-V array.
* This deletes one of the two sysvals, but these are not used. */
|| (e->sysval_semantic == VKD3D_SHADER_SV_TESS_FACTOR_LINEDET
&& f->sysval_semantic == VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN)
|| (e->sysval_semantic == VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN
&& f->sysval_semantic == VKD3D_SHADER_SV_TESS_FACTOR_LINEDET);
}
/* Merge tess factor sysvals because they are an array in SPIR-V. */
static void shader_signature_map_patch_constant_index_ranges(struct shader_signature *s,
uint8_t range_map[][VKD3D_VEC4_SIZE])
{
struct signature_element *e, *f;
unsigned int i, j, register_count;
qsort(s->elements, s->element_count, sizeof(s->elements[0]), signature_element_mask_compare);
for (i = 0; i < s->element_count; i += register_count)
{
e = &s->elements[i];
register_count = 1;
if (!e->sysval_semantic)
continue;
for (j = i + 1; j < s->element_count; ++j, ++register_count)
{
f = &s->elements[j];
if (f->register_index != e->register_index + register_count || !sysval_semantics_should_merge(e, f))
break;
}
if (register_count < 2)
continue;
range_map_set_register_range(range_map, e->register_index, register_count, e->mask, false);
}
}
static int signature_element_register_compare(const void *a, const void *b)
{
const struct signature_element *e = a, *f = b;
return vkd3d_u32_compare(e->register_index, f->register_index);
}
static int signature_element_index_compare(const void *a, const void *b)
{
const struct signature_element *e = a, *f = b;
return vkd3d_u32_compare(e->sort_index, f->sort_index);
}
Conor McCarthy
committed
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
static unsigned int signature_element_range_expand_mask(struct signature_element *e, unsigned int register_count,
uint8_t range_map[][VKD3D_VEC4_SIZE])
{
unsigned int i, j, component_idx, component_count, merged_write_mask = e->mask;
/* dcl_indexrange instructions can declare a subset of the full mask, and the masks of
* the elements within the range may differ. TPF's handling of arrayed inputs with
* dcl_indexrange is really just a hack. Here we create a mask which covers all element
* masks, and check for collisions with other ranges. */
for (i = 1; i < register_count; ++i)
merged_write_mask |= e[i].mask;
if (merged_write_mask == e->mask)
return merged_write_mask;
/* Reaching this point is very rare to begin with, and collisions are even rarer or
* impossible. If the latter shows up, the fallback in shader_signature_find_element_for_reg()
* may be sufficient. */
component_idx = vsir_write_mask_get_component_idx(e->mask);
component_count = vsir_write_mask_component_count(e->mask);
for (i = e->register_index; i < e->register_index + register_count; ++i)
{
for (j = 0; j < component_idx; ++j)
if (range_map[i][j])
break;
for (j = component_idx + component_count; j < VKD3D_VEC4_SIZE; ++j)
if (range_map[i][j])
break;
}
if (i == register_count)
{
WARN("Expanding mask %#x to %#x for %s, base reg %u, count %u.\n", e->mask, merged_write_mask,
e->semantic_name, e->register_index, register_count);
return merged_write_mask;
}
WARN("Cannot expand mask %#x to %#x for %s, base reg %u, count %u.\n", e->mask, merged_write_mask,
e->semantic_name, e->register_index, register_count);
return e->mask;
}
Conor McCarthy
committed
static bool shader_signature_merge(struct shader_signature *s, uint8_t range_map[][VKD3D_VEC4_SIZE],
bool is_patch_constant)
{
unsigned int i, j, element_count, new_count, register_count;
struct signature_element *elements;
struct signature_element *e, *f;
Conor McCarthy
committed
bool used;
Conor McCarthy
committed
element_count = s->element_count;
if (!(elements = vkd3d_malloc(element_count * sizeof(*elements))))
return false;
memcpy(elements, s->elements, element_count * sizeof(*elements));
qsort(elements, element_count, sizeof(elements[0]), signature_element_register_compare);
for (i = 0, new_count = 0; i < element_count; i = j, elements[new_count++] = *e)
{
e = &elements[i];
j = i + 1;
if (e->register_index == ~0u)
continue;
/* Do not merge if the register index will be relative-addressed. */
if (range_map_get_register_count(range_map, e->register_index, e->mask) > 1)
continue;
Conor McCarthy
committed
used = e->used_mask;
Conor McCarthy
committed
for (; j < element_count; ++j)
{
f = &elements[j];
/* Merge different components of the same register unless sysvals are different,
Conor McCarthy
committed
* or it will be relative-addressed. */
Conor McCarthy
committed
if (f->register_index != e->register_index || f->sysval_semantic != e->sysval_semantic
|| range_map_get_register_count(range_map, f->register_index, f->mask) > 1)
break;
TRACE("Merging %s, reg %u, mask %#x, sysval %#x with %s, mask %#x, sysval %#x.\n", e->semantic_name,
e->register_index, e->mask, e->sysval_semantic, f->semantic_name, f->mask, f->sysval_semantic);
assert(!(e->mask & f->mask));
e->mask |= f->mask;
e->used_mask |= f->used_mask;
e->semantic_index = min(e->semantic_index, f->semantic_index);
Conor McCarthy
committed
/* The first element may have no interpolation mode if it is unused. Elements which