Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • wine/vkd3d
  • stefan/vkd3d
  • cmccarthy/vkd3d
  • giomasce/vkd3d
  • fcasas/vkd3d
  • jactry/vkd3d
  • ReDress/vkd3d
  • mstorsjo/vkd3d
  • huw/vkd3d
  • julliard/vkd3d
  • bshanks/vkd3d
  • zfigura/vkd3d
  • hverbeet/vkd3d
  • DarkShadow44/vkd3d
  • nsivov/vkd3d
  • dhary686/vkd3d
  • Mystral/vkd3d
  • maljaf/vkd3d
  • smcv/vkd3d
  • flibitijibibo/vkd3d
  • q4a/vkd3d
  • jsikorski/vkd3d
  • alesliehughes/vkd3d-alesliehughes
  • vitorhnn/vkd3d
  • agusev/vkd3d
  • etang-cw/vkd3d
  • petrathekat/vkd3d
  • simon.mr995/vkd3d
  • sgwaki/vkd3d
  • jacek/vkd3d
  • fweimer/vkd3d
  • Clara/vkd3d
  • disini/vkd3d
  • antenabr2/vkd3d
  • gilvbp/vkd3d
  • yshui/vkd3d
  • shaunren/vkd3d
  • jennetsaryyewa96/vkd3d
  • Jamesattay/vkd3d
  • zacemmneeto77/vkd3d
  • GermanAizek/vkd3d
  • opespinach/vkd3d
  • ruslanboyka201/vkd3d
  • navi/vkd3d
  • Feifan/vkd3d
  • yashmhmdly172/vkd3d
  • Sec32fun32/vkd3d
  • ritalat/vkd3d
  • ivyl/vkd3d
  • baikaishiuc/vkd3d
  • austin987/vkd3d
  • TornadoCookie/vkd3d
52 results
Show changes
Commits on Source (38)
Showing
with 1744 additions and 202 deletions
......@@ -23,6 +23,7 @@ vkd3d-*.tar.xz
*.tab.c
*.tab.h
*.trs
*.txt
*.yy.c
*~
......
stages:
- image
- build
- test
include:
- local: "/gitlab/image.yml"
- local: "/gitlab/build.yml"
- local: "/gitlab/test.yml"
......@@ -166,9 +166,13 @@ vkd3d_shader_tests = \
tests/hlsl/struct-array.shader_test \
tests/hlsl/struct-assignment.shader_test \
tests/hlsl/struct-semantics.shader_test \
tests/hlsl/switch.shader_test \
tests/hlsl/swizzle-constant-prop.shader_test \
tests/hlsl/swizzle-matrix.shader_test \
tests/hlsl/swizzles.shader_test \
tests/hlsl/technique-fx_2.shader_test \
tests/hlsl/technique-fx_4.shader_test \
tests/hlsl/technique-fx_5.shader_test \
tests/hlsl/ternary.shader_test \
tests/hlsl/texture-load-offset.shader_test \
tests/hlsl/texture-load-typed.shader_test \
......@@ -480,6 +484,9 @@ shader_runner_cross_sources = \
$(srcdir)/tests/shader_runner_d3d11.c \
$(srcdir)/tests/shader_runner_d3d12.c
driver_cross_sources = \
$(srcdir)/tests/driver.c
if HAVE_CROSSTARGET32
CROSS32_CC = @CROSSCC32@
CROSS32_DLLTOOL = @CROSSTARGET32@-dlltool
......@@ -495,6 +502,7 @@ endif
CROSS32_FILES = $(CROSS32_EXEFILES)
if BUILD_TESTS
CROSS32_FILES += tests/shader_runner.cross32.exe
CROSS32_FILES += tests/driver.cross32.exe
endif
CLEANFILES += $(CROSS32_IMPLIBS) $(CROSS32_FILES)
......@@ -517,6 +525,11 @@ tests/shader_runner.cross32.exe: $(shader_runner_cross_sources) $(CROSS32_IMPLIB
$(CROSS32_CC) $(CROSS_CFLAGS) -MT $@ -MD -MP -MF $$depbase.Tpo -o $@ $(shader_runner_cross_sources) $(CROSS32_IMPLIBS) -ldxgi -lgdi32 -ld3dcompiler_47 && \
$(am__mv) $$depbase.Tpo $$depbase.Po
tests/driver.cross32.exe: $(driver_cross_sources)
$(AM_V_CCLD)depbase=`echo $@ | sed 's![^/]*$$!$(DEPDIR)/&!;s!\.exe$$!!'`; \
$(CROSS32_CC) $(CROSS_CFLAGS) -MT $@ -MD -MP -MF $$depbase.Tpo -o $@ $(driver_cross_sources) && \
$(am__mv) $$depbase.Tpo $$depbase.Po
else
crosstest32:
endif
......@@ -536,6 +549,7 @@ endif
CROSS64_FILES = $(CROSS64_EXEFILES)
if BUILD_TESTS
CROSS64_FILES += tests/shader_runner.cross64.exe
CROSS64_FILES += tests/driver.cross64.exe
endif
CLEANFILES += $(CROSS64_IMPLIBS) $(CROSS64_FILES)
......@@ -558,12 +572,28 @@ tests/shader_runner.cross64.exe: $(shader_runner_cross_sources) $(CROSS64_IMPLIB
$(CROSS64_CC) $(CROSS_CFLAGS) -MT $@ -MD -MP -MF $$depbase.Tpo -o $@ $(shader_runner_cross_sources) $(CROSS64_IMPLIBS) -ldxgi -lgdi32 -ld3dcompiler_47 && \
$(am__mv) $$depbase.Tpo $$depbase.Po
tests/driver.cross64.exe: $(driver_cross_sources)
$(AM_V_CCLD)depbase=`echo $@ | sed 's![^/]*$$!$(DEPDIR)/&!;s!\.exe$$!!'`; \
$(CROSS64_CC) $(CROSS_CFLAGS) -MT $@ -MD -MP -MF $$depbase.Tpo -o $@ $(driver_cross_sources) && \
$(am__mv) $$depbase.Tpo $$depbase.Po
else
crosstest64:
endif
.PHONY: crosstest crosstest32 crosstest64
crosstest: crosstest32 crosstest64
tests/crosstests.txt: FORCE
$(AM_V_GEN) for i in $(vkd3d_cross_tests) ; do echo $$i ; done > $@
tests/shader_tests.txt: FORCE
$(AM_V_GEN) for i in $(vkd3d_shader_tests) ; do echo $$i ; done > $@
crosstest-lists: tests/crosstests.txt tests/shader_tests.txt
CLEANFILES += tests/crosstests.txt tests/shader_tests.txt
FORCE:
.PHONY: crosstest crosstest32 crosstest64 crosstest-lists FORCE
crosstest: crosstest32 crosstest64 crosstest-lists
if BUILD_DOC
@DX_RULES@
......
......@@ -19,9 +19,10 @@ MoltenVK as the Vulkan driver. The llvmpipe and macOS jobs are
currently allowed to fail.
Additionally, MinGW is used to build PE binaries for both vkd3d and
its crosstests, for both 32 and 64 bit. These builds are not currently
tested (but the pipeline still fails if the compilation is not
successful).
its crosstests, for both 32 and 64 bit. The PE crosstests are executed
on Windows 10 to check that behavior imposed by the tests corresponds
to Microsoft's D3D12 implementation. The rendering backend is
currently Window's WARP software implementation.
The testing logs are available as CI artifacts, as well as the PE
modules built by the crosstest and MinGW jobs.
......@@ -58,3 +59,6 @@ environment for running the tests. All the software required to
compile and run the tests will therefore have to be installed directly
on the host system. Complete instructions to setup the macOS are
currently not available.
Finally, a runner tagged with `win10-21h2' must be available and
submit jobs to a Windows 10 virtual machine.
......@@ -19,6 +19,9 @@ cd build
touch ../pipeline_failed
mkdir -p ../artifacts/$COMMIT
rsync -Rr config.log tests/*.exe ../artifacts/$COMMIT
rsync -Rr config.log tests/*.txt tests/*.exe ../artifacts/$COMMIT
# Make the driver easily available to the Windows CI job
cp tests/driver.cross64.exe ../artifacts
git reset --hard
test-win-64:
stage: test
rules:
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
interruptible: true
needs:
- job: build-crosstest
tags:
- win10-21h2
script:
- ./artifacts/driver.cross64.exe
variables:
TEST_ARCH: "64"
artifacts:
when: always
paths:
- artifacts
test-win-32:
stage: test
rules:
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
interruptible: true
needs:
- job: build-crosstest
tags:
- win10-21h2
script:
- ./artifacts/driver.cross64.exe
variables:
TEST_ARCH: "32"
artifacts:
when: always
paths:
- artifacts
......@@ -555,9 +555,9 @@ static bool add_signature_element(struct vkd3d_shader_sm1_parser *sm1, bool outp
return false;
element = &signature->elements[signature->element_count++];
memset(element, 0, sizeof(*element));
element->semantic_name = name;
element->semantic_index = index;
element->stream_index = 0;
element->sysval_semantic = sysval;
element->component_type = VKD3D_SHADER_COMPONENT_FLOAT;
element->register_index = register_index;
......@@ -565,7 +565,8 @@ static bool add_signature_element(struct vkd3d_shader_sm1_parser *sm1, bool outp
element->register_count = 1;
element->mask = mask;
element->used_mask = is_dcl ? 0 : mask;
element->min_precision = VKD3D_SHADER_MINIMUM_PRECISION_NONE;
if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL && !output)
element->interpolation_mode = VKD3DSIM_LINEAR;
return true;
}
......
......@@ -246,6 +246,8 @@ enum dx_intrinsic_opcode
{
DX_LOAD_INPUT = 4,
DX_STORE_OUTPUT = 5,
DX_CREATE_HANDLE = 57,
DX_CBUFFER_LOAD_LEGACY = 59,
};
struct sm6_pointer_info
......@@ -305,6 +307,7 @@ enum sm6_value_type
{
VALUE_TYPE_FUNCTION,
VALUE_TYPE_REG,
VALUE_TYPE_HANDLE,
};
struct sm6_function_data
......@@ -314,6 +317,12 @@ struct sm6_function_data
unsigned int attribs_id;
};
struct sm6_handle_data
{
const struct sm6_descriptor_info *d;
struct vkd3d_shader_register reg;
};
struct sm6_value
{
const struct sm6_type *type;
......@@ -323,6 +332,7 @@ struct sm6_value
{
struct sm6_function_data function;
struct vkd3d_shader_register reg;
struct sm6_handle_data handle;
} u;
};
......@@ -427,6 +437,13 @@ struct sm6_named_metadata
struct sm6_metadata_value value;
};
struct sm6_descriptor_info
{
enum vkd3d_shader_descriptor_type type;
unsigned int id;
struct vkd3d_shader_register_range range;
};
struct sm6_parser
{
const uint32_t *ptr, *start, *end;
......@@ -442,6 +459,7 @@ struct sm6_parser
struct sm6_type *types;
size_t type_count;
struct sm6_type *metadata_type;
struct sm6_type *handle_type;
struct sm6_symbol *global_symbols;
size_t global_symbol_count;
......@@ -458,6 +476,10 @@ struct sm6_parser
struct sm6_named_metadata *named_metadata;
unsigned int named_metadata_count;
struct sm6_descriptor_info *descriptors;
size_t descriptor_capacity;
size_t descriptor_count;
struct sm6_value *values;
size_t value_count;
size_t value_capacity;
......@@ -1391,6 +1413,9 @@ static enum vkd3d_result sm6_parser_type_table_init(struct sm6_parser *sm6)
break;
}
if (!ascii_strcasecmp(struct_name, "dx.types.Handle"))
sm6->handle_type = type;
type->u.struc->name = struct_name;
struct_name = NULL;
break;
......@@ -1438,6 +1463,11 @@ static inline bool sm6_type_is_integer(const struct sm6_type *type)
return type->class == TYPE_CLASS_INTEGER;
}
static bool sm6_type_is_bool(const struct sm6_type *type)
{
return type->class == TYPE_CLASS_INTEGER && type->u.width == 1;
}
static inline bool sm6_type_is_i8(const struct sm6_type *type)
{
return type->class == TYPE_CLASS_INTEGER && type->u.width == 8;
......@@ -1453,6 +1483,11 @@ static inline bool sm6_type_is_floating_point(const struct sm6_type *type)
return type->class == TYPE_CLASS_FLOAT;
}
static bool sm6_type_is_scalar(const struct sm6_type *type)
{
return type->class == TYPE_CLASS_INTEGER || type->class == TYPE_CLASS_FLOAT || type->class == TYPE_CLASS_POINTER;
}
static inline bool sm6_type_is_numeric(const struct sm6_type *type)
{
return type->class == TYPE_CLASS_INTEGER || type->class == TYPE_CLASS_FLOAT;
......@@ -1463,6 +1498,11 @@ static inline bool sm6_type_is_pointer(const struct sm6_type *type)
return type->class == TYPE_CLASS_POINTER;
}
static bool sm6_type_is_aggregate(const struct sm6_type *type)
{
return type->class == TYPE_CLASS_STRUCT || type->class == TYPE_CLASS_VECTOR || type->class == TYPE_CLASS_ARRAY;
}
static bool sm6_type_is_numeric_aggregate(const struct sm6_type *type)
{
unsigned int i;
......@@ -1533,6 +1573,27 @@ static const struct sm6_type *sm6_type_get_pointer_to_type(const struct sm6_type
return NULL;
}
/* Call for aggregate types only. */
static const struct sm6_type *sm6_type_get_element_type_at_index(const struct sm6_type *type, uint64_t elem_idx)
{
switch (type->class)
{
case TYPE_CLASS_ARRAY:
case TYPE_CLASS_VECTOR:
if (elem_idx >= type->u.array.count)
return NULL;
return type->u.array.elem_type;
case TYPE_CLASS_STRUCT:
if (elem_idx >= type->u.struc->elem_count)
return NULL;
return type->u.struc->elem_types[elem_idx];
default:
vkd3d_unreachable();
}
}
/* Never returns null for elem_idx 0. */
static const struct sm6_type *sm6_type_get_scalar_type(const struct sm6_type *type, unsigned int elem_idx)
{
......@@ -1557,6 +1618,11 @@ static const struct sm6_type *sm6_type_get_scalar_type(const struct sm6_type *ty
}
}
static unsigned int sm6_type_max_vector_size(const struct sm6_type *type)
{
return min((VKD3D_VEC4_SIZE * sizeof(uint32_t) * CHAR_BIT) / type->u.width, VKD3D_VEC4_SIZE);
}
static const struct sm6_type *sm6_parser_get_type(struct sm6_parser *sm6, uint64_t type_id)
{
if (type_id >= sm6->type_count)
......@@ -1661,7 +1727,7 @@ static const char *sm6_parser_get_global_symbol_name(const struct sm6_parser *sm
static unsigned int register_get_uint_value(const struct vkd3d_shader_register *reg)
{
if (!register_is_constant(reg) || !data_type_is_integer(reg->data_type))
if (!register_is_constant(reg) || (!data_type_is_integer(reg->data_type) && !data_type_is_bool(reg->data_type)))
return UINT_MAX;
if (reg->dimension == VSIR_DIMENSION_VEC4)
......@@ -1710,6 +1776,11 @@ static inline bool sm6_value_is_register(const struct sm6_value *value)
return value->value_type == VALUE_TYPE_REG;
}
static bool sm6_value_is_handle(const struct sm6_value *value)
{
return value->value_type == VALUE_TYPE_HANDLE;
}
static inline bool sm6_value_is_constant(const struct sm6_value *value)
{
return sm6_value_is_register(value) && register_is_constant(&value->u.reg);
......@@ -1782,6 +1853,8 @@ static enum vkd3d_data_type vkd3d_data_type_from_sm6_type(const struct sm6_type
{
switch (type->u.width)
{
case 1:
return VKD3D_DATA_BOOL;
case 8:
return VKD3D_DATA_UINT8;
case 32:
......@@ -1811,8 +1884,8 @@ static enum vkd3d_data_type vkd3d_data_type_from_sm6_type(const struct sm6_type
return VKD3D_DATA_UINT;
}
static void register_init_ssa_scalar(struct vkd3d_shader_register *reg, const struct sm6_type *type,
struct sm6_parser *sm6)
static void register_init_ssa_vector(struct vkd3d_shader_register *reg, const struct sm6_type *type,
unsigned int component_count, struct sm6_parser *sm6)
{
enum vkd3d_data_type data_type;
unsigned int id;
......@@ -1820,6 +1893,13 @@ static void register_init_ssa_scalar(struct vkd3d_shader_register *reg, const st
id = sm6_parser_alloc_ssa_id(sm6);
data_type = vkd3d_data_type_from_sm6_type(sm6_type_get_scalar_type(type, 0));
register_init_with_id(reg, VKD3DSPR_SSA, data_type, id);
reg->dimension = component_count > 1 ? VSIR_DIMENSION_VEC4 : VSIR_DIMENSION_SCALAR;
}
static void register_init_ssa_scalar(struct vkd3d_shader_register *reg, const struct sm6_type *type,
struct sm6_parser *sm6)
{
register_init_ssa_vector(reg, type, 1, sm6);
}
static void dst_param_init(struct vkd3d_shader_dst_param *param)
......@@ -1836,6 +1916,13 @@ static inline void dst_param_init_scalar(struct vkd3d_shader_dst_param *param, u
param->shift = 0;
}
static void dst_param_init_vector(struct vkd3d_shader_dst_param *param, unsigned int component_count)
{
param->write_mask = (1u << component_count) - 1;
param->modifiers = 0;
param->shift = 0;
}
static void dst_param_init_ssa_scalar(struct vkd3d_shader_dst_param *param, const struct sm6_type *type,
struct sm6_parser *sm6)
{
......@@ -1861,25 +1948,32 @@ static void src_param_init_from_value(struct vkd3d_shader_src_param *param, cons
param->reg = src->u.reg;
}
static void register_address_init(struct vkd3d_shader_register *reg, const struct sm6_value *address,
unsigned int idx, struct sm6_parser *sm6)
static void src_param_init_vector_from_reg(struct vkd3d_shader_src_param *param,
const struct vkd3d_shader_register *reg)
{
param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
param->modifiers = VKD3DSPSM_NONE;
param->reg = *reg;
}
static void register_index_address_init(struct vkd3d_shader_register_index *idx, const struct sm6_value *address,
struct sm6_parser *sm6)
{
assert(idx < ARRAY_SIZE(reg->idx));
if (sm6_value_is_constant(address))
{
reg->idx[idx].offset = sm6_value_get_constant_uint(address);
idx->offset = sm6_value_get_constant_uint(address);
}
else if (sm6_value_is_undef(address))
{
reg->idx[idx].offset = 0;
idx->offset = 0;
}
else
{
struct vkd3d_shader_src_param *rel_addr = shader_parser_get_src_params(&sm6->p, 1);
if (rel_addr)
src_param_init_from_value(rel_addr, address);
reg->idx[idx].offset = 0;
reg->idx[idx].rel_addr = rel_addr;
idx->offset = 0;
idx->rel_addr = rel_addr;
}
}
......@@ -1893,6 +1987,17 @@ static void instruction_dst_param_init_ssa_scalar(struct vkd3d_shader_instructio
dst->u.reg = param->reg;
}
static void instruction_dst_param_init_ssa_vector(struct vkd3d_shader_instruction *ins,
unsigned int component_count, struct sm6_parser *sm6)
{
struct vkd3d_shader_dst_param *param = instruction_dst_params_alloc(ins, 1, sm6);
struct sm6_value *dst = sm6_parser_get_current_value(sm6);
dst_param_init_vector(param, component_count);
register_init_ssa_vector(&param->reg, sm6_type_get_scalar_type(dst->type, 0), component_count, sm6);
dst->u.reg = param->reg;
}
/* Recurse through the block tree while maintaining a current value count. The current
* count is the sum of the global count plus all declarations within the current function.
* Store into value_capacity the highest count seen. */
......@@ -1957,6 +2062,18 @@ static size_t sm6_parser_get_value_index(struct sm6_parser *sm6, uint64_t idx)
return i;
}
static bool sm6_value_validate_is_handle(const struct sm6_value *value, struct sm6_parser *sm6)
{
if (!sm6_value_is_handle(value))
{
WARN("Handle parameter of type %u is not a handle.\n", value->value_type);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCE_HANDLE,
"A handle parameter passed to a DX intrinsic function is not a handle.");
return false;
}
return true;
}
static const struct sm6_value *sm6_parser_get_value_safe(struct sm6_parser *sm6, unsigned int idx)
{
if (idx < sm6->value_count)
......@@ -2519,6 +2636,92 @@ static struct sm6_block *sm6_block_create()
return block;
}
static void sm6_parser_emit_dx_cbuffer_load(struct sm6_parser *sm6, struct sm6_block *code_block,
enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
{
struct sm6_value *dst = sm6_parser_get_current_value(sm6);
struct vkd3d_shader_src_param *src_param;
const struct sm6_value *buffer;
const struct sm6_type *type;
buffer = operands[0];
if (!sm6_value_validate_is_handle(buffer, sm6))
return;
vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
src_param = instruction_src_params_alloc(ins, 1, sm6);
src_param_init_vector_from_reg(src_param, &buffer->u.handle.reg);
register_index_address_init(&src_param->reg.idx[2], operands[1], sm6);
assert(src_param->reg.idx_count == 3);
type = sm6_type_get_scalar_type(dst->type, 0);
assert(type);
src_param->reg.data_type = vkd3d_data_type_from_sm6_type(type);
instruction_dst_param_init_ssa_vector(ins, sm6_type_max_vector_size(type), sm6);
}
static const struct sm6_descriptor_info *sm6_parser_get_descriptor(struct sm6_parser *sm6,
enum vkd3d_shader_descriptor_type type, unsigned int id, const struct sm6_value *address)
{
const struct sm6_descriptor_info *d;
unsigned int register_index;
size_t i;
for (i = 0; i < sm6->descriptor_count; ++i)
{
d = &sm6->descriptors[i];
if (d->type != type || d->id != id)
continue;
if (!sm6_value_is_constant(address))
return d;
register_index = sm6_value_get_constant_uint(address);
if (register_index >= d->range.first && register_index <= d->range.last)
return d;
}
return NULL;
}
static void sm6_parser_emit_dx_create_handle(struct sm6_parser *sm6, struct sm6_block *code_block,
enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
{
enum vkd3d_shader_descriptor_type type;
const struct sm6_descriptor_info *d;
struct vkd3d_shader_register *reg;
struct sm6_value *dst;
unsigned int id;
type = sm6_value_get_constant_uint(operands[0]);
id = sm6_value_get_constant_uint(operands[1]);
if (!(d = sm6_parser_get_descriptor(sm6, type, id, operands[2])))
{
WARN("Failed to find resource type %#x, id %#x.\n", type, id);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Descriptor for resource type %#x, id %#x was not found.", type, id);
return;
}
dst = sm6_parser_get_current_value(sm6);
dst->value_type = VALUE_TYPE_HANDLE;
dst->u.handle.d = d;
reg = &dst->u.handle.reg;
/* Set idx_count to 3 for use with load instructions.
* TODO: set register type from resource type when other types are supported. */
vsir_register_init(reg, VKD3DSPR_CONSTBUFFER, VKD3D_DATA_FLOAT, 3);
reg->idx[0].offset = id;
register_index_address_init(&reg->idx[1], operands[2], sm6);
reg->non_uniform = !!sm6_value_get_constant_uint(operands[3]);
/* NOP is used to flag no instruction emitted. */
ins->handler_idx = VKD3DSIH_NOP;
}
static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, struct sm6_block *code_block,
enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
{
......@@ -2546,7 +2749,7 @@ static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, struct sm6_blo
src_param->reg = sm6->input_params[row_index].reg;
src_param_init_scalar(src_param, column_index);
if (e->register_count > 1)
register_address_init(&src_param->reg, operands[1], 0, sm6);
register_index_address_init(&src_param->reg.idx[0], operands[1], sm6);
instruction_dst_param_init_ssa_scalar(ins, sm6);
}
......@@ -2598,7 +2801,7 @@ static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, struct sm6_b
dst_param_init_scalar(dst_param, column_index);
dst_param->reg = sm6->output_params[row_index].reg;
if (e->register_count > 1)
register_address_init(&dst_param->reg, operands[1], 0, sm6);
register_index_address_init(&dst_param->reg.idx[0], operands[1], sm6);
if ((src_param = instruction_src_params_alloc(ins, 1, sm6)))
src_param_init_from_value(src_param, value);
......@@ -2614,18 +2817,29 @@ struct sm6_dx_opcode_info
/*
8 -> int8
b -> constant int1
c -> constant int8/16/32
i -> int32
H -> handle
v -> void
o -> overloaded
*/
static const struct sm6_dx_opcode_info sm6_dx_op_table[] =
{
[DX_CBUFFER_LOAD_LEGACY ] = {'o', "Hi", sm6_parser_emit_dx_cbuffer_load},
[DX_CREATE_HANDLE ] = {'H', "ccib", sm6_parser_emit_dx_create_handle},
[DX_LOAD_INPUT ] = {'o', "ii8i", sm6_parser_emit_dx_load_input},
[DX_STORE_OUTPUT ] = {'v', "ii8o", sm6_parser_emit_dx_store_output},
};
static bool sm6_parser_validate_operand_type(struct sm6_parser *sm6, const struct sm6_type *type, char info_type)
static bool sm6_parser_validate_operand_type(struct sm6_parser *sm6, const struct sm6_value *value, char info_type,
bool is_return)
{
const struct sm6_type *type = value->type;
if (info_type != 'H' && !sm6_value_is_register(value))
return false;
switch (info_type)
{
case 0:
......@@ -2633,8 +2847,15 @@ static bool sm6_parser_validate_operand_type(struct sm6_parser *sm6, const struc
return false;
case '8':
return sm6_type_is_i8(type);
case 'b':
return sm6_value_is_constant(value) && sm6_type_is_bool(type);
case 'c':
return sm6_value_is_constant(value) && sm6_type_is_integer(type) && type->u.width >= 8
&& type->u.width <= 32;
case 'i':
return sm6_type_is_i32(type);
case 'H':
return (is_return || sm6_value_is_handle(value)) && type == sm6->handle_type;
case 'v':
return !type;
case 'o':
......@@ -2654,7 +2875,7 @@ static bool sm6_parser_validate_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_
info = &sm6_dx_op_table[op];
if (!sm6_parser_validate_operand_type(sm6, dst->type, info->ret_type))
if (!sm6_parser_validate_operand_type(sm6, dst, info->ret_type, true))
{
WARN("Failed to validate return type for dx intrinsic id %u, '%s'.\n", op, name);
/* Return type validation failure is not so critical. We only need to set
......@@ -2664,7 +2885,7 @@ static bool sm6_parser_validate_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_
for (i = 0; i < operand_count; ++i)
{
const struct sm6_value *value = operands[i];
if (!sm6_value_is_register(value) || !sm6_parser_validate_operand_type(sm6, value->type, info->operand_info[i]))
if (!sm6_parser_validate_operand_type(sm6, value, info->operand_info[i], false))
{
WARN("Failed to validate operand %u for dx intrinsic id %u, '%s'.\n", i + 1, op, name);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
......@@ -2804,6 +3025,64 @@ static void sm6_parser_emit_call(struct sm6_parser *sm6, const struct dxil_recor
fn_value->u.function.name, &operands[1], operand_count - 1, ins, dst);
}
static void sm6_parser_emit_extractval(struct sm6_parser *sm6, const struct dxil_record *record,
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
{
struct vkd3d_shader_src_param *src_param;
const struct sm6_type *type;
const struct sm6_value *src;
unsigned int i = 0;
uint64_t elem_idx;
if (!(src = sm6_parser_get_value_by_ref(sm6, record, NULL, &i)))
return;
if (!dxil_record_validate_operand_min_count(record, i + 1, sm6))
return;
if (record->operand_count > i + 1)
{
FIXME("Unhandled multiple indices.\n");
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Multiple extractval indices are not supported.");
return;
}
type = src->type;
if (!sm6_type_is_aggregate(type))
{
WARN("Invalid extraction from non-aggregate.\n");
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Source type of an extractval instruction is not an aggregate.");
return;
}
elem_idx = record->operands[i];
if (!(type = sm6_type_get_element_type_at_index(type, elem_idx)))
{
WARN("Invalid element index %"PRIu64".\n", elem_idx);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Element index %"PRIu64" for an extractval instruction is out of bounds.", elem_idx);
return;
}
if (!sm6_type_is_scalar(type))
{
FIXME("Nested extraction is not supported.\n");
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Extraction from nested aggregates is not supported.");
return;
}
dst->type = type;
ins->handler_idx = VKD3DSIH_MOV;
src_param = instruction_src_params_alloc(ins, 1, sm6);
src_param_init_from_value(src_param, src);
src_param->swizzle = vkd3d_shader_create_swizzle(elem_idx, elem_idx, elem_idx, elem_idx);
instruction_dst_param_init_ssa_scalar(ins, sm6);
}
static void sm6_parser_emit_ret(struct sm6_parser *sm6, const struct dxil_record *record,
struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
{
......@@ -2959,6 +3238,9 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
case FUNC_CODE_INST_CALL:
sm6_parser_emit_call(sm6, record, code_block, ins, dst);
break;
case FUNC_CODE_INST_EXTRACTVAL:
sm6_parser_emit_extractval(sm6, record, ins, dst);
break;
case FUNC_CODE_INST_RET:
sm6_parser_emit_ret(sm6, record, code_block, ins);
is_terminator = true;
......@@ -3423,8 +3705,7 @@ static bool sm6_parser_resources_load_register_range(struct sm6_parser *sm6,
}
static enum vkd3d_result sm6_parser_resources_load_cbv(struct sm6_parser *sm6,
const struct sm6_metadata_node *node, const struct vkd3d_shader_register_range *range,
unsigned int register_id, struct vkd3d_shader_instruction *ins)
const struct sm6_metadata_node *node, struct sm6_descriptor_info *d, struct vkd3d_shader_instruction *ins)
{
struct vkd3d_shader_register *reg;
unsigned int buffer_size;
......@@ -3459,11 +3740,11 @@ static enum vkd3d_result sm6_parser_resources_load_cbv(struct sm6_parser *sm6,
reg = &ins->declaration.cb.src.reg;
vsir_register_init(reg, VKD3DSPR_CONSTBUFFER, VKD3D_DATA_FLOAT, 3);
reg->idx[0].offset = register_id;
reg->idx[1].offset = range->first;
reg->idx[2].offset = range->last;
reg->idx[0].offset = d->id;
reg->idx[1].offset = d->range.first;
reg->idx[2].offset = d->range.last;
ins->declaration.cb.range = *range;
ins->declaration.cb.range = d->range;
return VKD3D_OK;
}
......@@ -3471,12 +3752,12 @@ static enum vkd3d_result sm6_parser_resources_load_cbv(struct sm6_parser *sm6,
static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
enum vkd3d_shader_descriptor_type type, const struct sm6_metadata_node *descriptor_node)
{
struct vkd3d_shader_register_range range;
struct vkd3d_shader_instruction *ins;
const struct sm6_metadata_node *node;
const struct sm6_metadata_value *m;
unsigned int i, register_id;
struct sm6_descriptor_info *d;
enum vkd3d_result ret;
unsigned int i;
for (i = 0; i < descriptor_node->operand_count; ++i)
{
......@@ -3498,7 +3779,18 @@ static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
return VKD3D_ERROR_INVALID_SHADER;
}
if (!sm6_metadata_get_uint_value(sm6, node->operands[0], &register_id))
if (!vkd3d_array_reserve((void **)&sm6->descriptors, &sm6->descriptor_capacity,
sm6->descriptor_count + 1, sizeof(*sm6->descriptors)))
{
ERR("Failed to allocate descriptor array.\n");
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
"Out of memory allocating the descriptor array.");
return VKD3D_ERROR_OUT_OF_MEMORY;
}
d = &sm6->descriptors[sm6->descriptor_count];
d->type = type;
if (!sm6_metadata_get_uint_value(sm6, node->operands[0], &d->id))
{
WARN("Failed to load resource id.\n");
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
......@@ -3506,7 +3798,7 @@ static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
return VKD3D_ERROR_INVALID_SHADER;
}
if (!sm6_parser_resources_load_register_range(sm6, node, &range))
if (!sm6_parser_resources_load_register_range(sm6, node, &d->range))
{
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
"Resource register range is invalid.");
......@@ -3522,7 +3814,7 @@ static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
switch (type)
{
case VKD3D_SHADER_DESCRIPTOR_TYPE_CBV:
if ((ret = sm6_parser_resources_load_cbv(sm6, node, &range, register_id, ins)) < 0)
if ((ret = sm6_parser_resources_load_cbv(sm6, node, d, ins)) < 0)
return ret;
break;
default:
......@@ -3532,6 +3824,7 @@ static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
return VKD3D_ERROR_INVALID_SHADER;
}
++sm6->descriptor_count;
++sm6->p.instructions.count;
}
......@@ -4132,6 +4425,7 @@ static void sm6_parser_destroy(struct vkd3d_shader_parser *parser)
sm6_symtab_cleanup(sm6->global_symbols, sm6->global_symbol_count);
sm6_functions_cleanup(sm6->functions, sm6->function_count);
sm6_parser_metadata_cleanup(sm6);
vkd3d_free(sm6->descriptors);
vkd3d_free(sm6->values);
free_shader_desc(&parser->shader_desc);
vkd3d_free(sm6);
......
......@@ -249,14 +249,7 @@ static enum hlsl_regset type_get_regset(const struct hlsl_type *type)
enum hlsl_regset hlsl_deref_get_regset(struct hlsl_ctx *ctx, const struct hlsl_deref *deref)
{
struct hlsl_type *type;
if (deref->data_type)
type = deref->data_type;
else
type = hlsl_deref_get_type(ctx, deref);
return type_get_regset(type);
return type_get_regset(hlsl_deref_get_type(ctx, deref));
}
unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int offset)
......@@ -519,7 +512,9 @@ static bool init_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hl
{
deref->var = var;
deref->path_len = path_len;
deref->offset.node = NULL;
deref->rel_offset.node = NULL;
deref->const_offset = 0;
deref->data_type = NULL;
if (path_len == 0)
{
......@@ -546,7 +541,8 @@ bool hlsl_init_deref_from_index_chain(struct hlsl_ctx *ctx, struct hlsl_deref *d
deref->path = NULL;
deref->path_len = 0;
deref->offset.node = NULL;
deref->rel_offset.node = NULL;
deref->const_offset = 0;
assert(chain);
if (chain->type == HLSL_IR_INDEX)
......@@ -609,7 +605,7 @@ struct hlsl_type *hlsl_deref_get_type(struct hlsl_ctx *ctx, const struct hlsl_de
assert(deref);
if (deref->offset.node)
if (hlsl_deref_is_lowered(deref))
return deref->data_type;
type = deref->var->data_type;
......@@ -1120,7 +1116,7 @@ bool hlsl_copy_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, const struc
if (!other)
return true;
assert(!other->offset.node);
assert(!hlsl_deref_is_lowered(other));
if (!init_deref(ctx, deref, other->var, other->path_len))
return false;
......@@ -1142,7 +1138,8 @@ void hlsl_cleanup_deref(struct hlsl_deref *deref)
deref->path = NULL;
deref->path_len = 0;
hlsl_src_remove(&deref->offset);
hlsl_src_remove(&deref->rel_offset);
deref->const_offset = 0;
}
/* Initializes a simple variable dereference, so that it can be passed to load/store functions. */
......@@ -1177,7 +1174,7 @@ struct hlsl_ir_node *hlsl_new_store_index(struct hlsl_ctx *ctx, const struct hls
unsigned int i;
assert(lhs);
assert(!lhs->offset.node);
assert(!hlsl_deref_is_lowered(lhs));
if (!(store = hlsl_alloc(ctx, sizeof(*store))))
return NULL;
......@@ -1343,6 +1340,40 @@ struct hlsl_ir_node *hlsl_new_if(struct hlsl_ctx *ctx, struct hlsl_ir_node *cond
return &iff->node;
}
struct hlsl_ir_switch_case *hlsl_new_switch_case(struct hlsl_ctx *ctx, unsigned int value,
bool is_default, struct hlsl_block *body, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_switch_case *c;
if (!(c = hlsl_alloc(ctx, sizeof(*c))))
return NULL;
c->value = value;
c->is_default = is_default;
hlsl_block_init(&c->body);
if (body)
hlsl_block_add_block(&c->body, body);
c->loc = *loc;
return c;
}
struct hlsl_ir_node *hlsl_new_switch(struct hlsl_ctx *ctx, struct hlsl_ir_node *selector,
struct list *cases, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_switch *s;
if (!(s = hlsl_alloc(ctx, sizeof(*s))))
return NULL;
init_node(&s->node, HLSL_IR_SWITCH, NULL, loc);
hlsl_src_from_node(&s->selector, selector);
list_init(&s->cases);
if (cases)
list_move_head(&s->cases, cases);
return &s->node;
}
struct hlsl_ir_load *hlsl_new_load_index(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc)
{
......@@ -1350,7 +1381,7 @@ struct hlsl_ir_load *hlsl_new_load_index(struct hlsl_ctx *ctx, const struct hlsl
struct hlsl_type *type;
unsigned int i;
assert(!deref->offset.node);
assert(!hlsl_deref_is_lowered(deref));
type = hlsl_deref_get_type(ctx, deref);
if (idx)
......@@ -1623,7 +1654,7 @@ static bool clone_deref(struct hlsl_ctx *ctx, struct clone_instr_map *map,
{
unsigned int i;
assert(!src->offset.node);
assert(!hlsl_deref_is_lowered(src));
if (!init_deref(ctx, dst, src->var, src->path_len))
return false;
......@@ -1805,6 +1836,58 @@ static struct hlsl_ir_node *clone_index(struct hlsl_ctx *ctx, struct clone_instr
return dst;
}
void hlsl_free_ir_switch_case(struct hlsl_ir_switch_case *c)
{
hlsl_block_cleanup(&c->body);
list_remove(&c->entry);
vkd3d_free(c);
}
void hlsl_cleanup_ir_switch_cases(struct list *cases)
{
struct hlsl_ir_switch_case *c, *next;
LIST_FOR_EACH_ENTRY_SAFE(c, next, cases, struct hlsl_ir_switch_case, entry)
{
hlsl_free_ir_switch_case(c);
}
}
static struct hlsl_ir_node *clone_switch(struct hlsl_ctx *ctx,
struct clone_instr_map *map, struct hlsl_ir_switch *s)
{
struct hlsl_ir_switch_case *c, *d;
struct hlsl_ir_node *ret;
struct hlsl_block body;
struct list cases;
list_init(&cases);
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
if (!(clone_block(ctx, &body, &c->body, map)))
{
hlsl_cleanup_ir_switch_cases(&cases);
return NULL;
}
d = hlsl_new_switch_case(ctx, c->value, c->is_default, &body, &c->loc);
hlsl_block_cleanup(&body);
if (!d)
{
hlsl_cleanup_ir_switch_cases(&cases);
return NULL;
}
list_add_tail(&cases, &d->entry);
}
ret = hlsl_new_switch(ctx, map_instr(map, s->selector.node), &cases, &s->node.loc);
hlsl_cleanup_ir_switch_cases(&cases);
return ret;
}
static struct hlsl_ir_node *clone_instr(struct hlsl_ctx *ctx,
struct clone_instr_map *map, const struct hlsl_ir_node *instr)
{
......@@ -1843,6 +1926,9 @@ static struct hlsl_ir_node *clone_instr(struct hlsl_ctx *ctx,
case HLSL_IR_STORE:
return clone_store(ctx, map, hlsl_ir_store(instr));
case HLSL_IR_SWITCH:
return clone_switch(ctx, map, hlsl_ir_switch(instr));
case HLSL_IR_SWIZZLE:
return clone_swizzle(ctx, map, hlsl_ir_swizzle(instr));
}
......@@ -2261,6 +2347,7 @@ const char *hlsl_node_type_to_string(enum hlsl_ir_node_type type)
[HLSL_IR_RESOURCE_LOAD ] = "HLSL_IR_RESOURCE_LOAD",
[HLSL_IR_RESOURCE_STORE] = "HLSL_IR_RESOURCE_STORE",
[HLSL_IR_STORE ] = "HLSL_IR_STORE",
[HLSL_IR_SWITCH ] = "HLSL_IR_SWITCH",
[HLSL_IR_SWIZZLE ] = "HLSL_IR_SWIZZLE",
};
......@@ -2327,21 +2414,34 @@ static void dump_deref(struct vkd3d_string_buffer *buffer, const struct hlsl_der
if (deref->var)
{
vkd3d_string_buffer_printf(buffer, "%s", deref->var->name);
if (deref->path_len)
if (!hlsl_deref_is_lowered(deref))
{
vkd3d_string_buffer_printf(buffer, "[");
for (i = 0; i < deref->path_len; ++i)
if (deref->path_len)
{
vkd3d_string_buffer_printf(buffer, "[");
dump_src(buffer, &deref->path[i]);
for (i = 0; i < deref->path_len; ++i)
{
vkd3d_string_buffer_printf(buffer, "[");
dump_src(buffer, &deref->path[i]);
vkd3d_string_buffer_printf(buffer, "]");
}
vkd3d_string_buffer_printf(buffer, "]");
}
vkd3d_string_buffer_printf(buffer, "]");
}
else if (deref->offset.node)
else
{
bool show_rel, show_const;
show_rel = deref->rel_offset.node;
show_const = deref->const_offset != 0 || !show_rel;
vkd3d_string_buffer_printf(buffer, "[");
dump_src(buffer, &deref->offset);
if (show_rel)
dump_src(buffer, &deref->rel_offset);
if (show_rel && show_const)
vkd3d_string_buffer_printf(buffer, " + ");
if (show_const)
vkd3d_string_buffer_printf(buffer, "%uc", deref->const_offset);
vkd3d_string_buffer_printf(buffer, "]");
}
}
......@@ -2685,6 +2785,32 @@ static void dump_ir_index(struct vkd3d_string_buffer *buffer, const struct hlsl_
vkd3d_string_buffer_printf(buffer, "]");
}
static void dump_ir_switch(struct hlsl_ctx *ctx, struct vkd3d_string_buffer *buffer, const struct hlsl_ir_switch *s)
{
struct hlsl_ir_switch_case *c;
vkd3d_string_buffer_printf(buffer, "switch (");
dump_src(buffer, &s->selector);
vkd3d_string_buffer_printf(buffer, ") {\n");
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
if (c->is_default)
{
vkd3d_string_buffer_printf(buffer, " %10s default: {\n", "");
}
else
{
vkd3d_string_buffer_printf(buffer, " %10s case %u : {\n", "", c->value);
}
dump_block(ctx, buffer, &c->body);
vkd3d_string_buffer_printf(buffer, " %10s }\n", "");
}
vkd3d_string_buffer_printf(buffer, " %10s }", "");
}
static void dump_instr(struct hlsl_ctx *ctx, struct vkd3d_string_buffer *buffer, const struct hlsl_ir_node *instr)
{
if (instr->index)
......@@ -2740,6 +2866,10 @@ static void dump_instr(struct hlsl_ctx *ctx, struct vkd3d_string_buffer *buffer,
dump_ir_store(buffer, hlsl_ir_store(instr));
break;
case HLSL_IR_SWITCH:
dump_ir_switch(ctx, buffer, hlsl_ir_switch(instr));
break;
case HLSL_IR_SWIZZLE:
dump_ir_swizzle(buffer, hlsl_ir_swizzle(instr));
break;
......@@ -2881,7 +3011,7 @@ static void free_ir_resource_load(struct hlsl_ir_resource_load *load)
static void free_ir_resource_store(struct hlsl_ir_resource_store *store)
{
hlsl_src_remove(&store->resource.offset);
hlsl_src_remove(&store->resource.rel_offset);
hlsl_src_remove(&store->coords);
hlsl_src_remove(&store->value);
vkd3d_free(store);
......@@ -2900,6 +3030,14 @@ static void free_ir_swizzle(struct hlsl_ir_swizzle *swizzle)
vkd3d_free(swizzle);
}
static void free_ir_switch(struct hlsl_ir_switch *s)
{
hlsl_src_remove(&s->selector);
hlsl_cleanup_ir_switch_cases(&s->cases);
vkd3d_free(s);
}
static void free_ir_index(struct hlsl_ir_index *index)
{
hlsl_src_remove(&index->val);
......@@ -2960,6 +3098,10 @@ void hlsl_free_instr(struct hlsl_ir_node *node)
case HLSL_IR_SWIZZLE:
free_ir_swizzle(hlsl_ir_swizzle(node));
break;
case HLSL_IR_SWITCH:
free_ir_switch(hlsl_ir_switch(node));
break;
}
}
......
......@@ -281,6 +281,7 @@ enum hlsl_ir_node_type
HLSL_IR_RESOURCE_STORE,
HLSL_IR_STORE,
HLSL_IR_SWIZZLE,
HLSL_IR_SWITCH,
};
/* Common data for every type of IR instruction node. */
......@@ -424,6 +425,9 @@ struct hlsl_ir_var
* It may be less than the allocation size, e.g. for texture arrays. */
unsigned int bind_count[HLSL_REGSET_LAST_OBJECT + 1];
/* Whether the shader performs dereferences with non-constant offsets in the variable. */
bool indexable;
uint32_t is_input_semantic : 1;
uint32_t is_output_semantic : 1;
uint32_t is_uniform : 1;
......@@ -499,6 +503,22 @@ struct hlsl_ir_loop
unsigned int next_index; /* liveness index of the end of the loop */
};
struct hlsl_ir_switch_case
{
unsigned int value;
bool is_default;
struct hlsl_block body;
struct list entry;
struct vkd3d_shader_location loc;
};
struct hlsl_ir_switch
{
struct hlsl_ir_node node;
struct hlsl_src selector;
struct list cases;
};
enum hlsl_ir_expr_op
{
HLSL_OP0_VOID,
......@@ -621,17 +641,25 @@ struct hlsl_deref
unsigned int path_len;
struct hlsl_src *path;
/* Single instruction node of data type uint used to represent the register offset (in register
* components, within the pertaining regset), from the start of the variable, of the part
* referenced.
* The path is lowered to this single offset -- whose value may vary between SM1 and SM4 --
* before writing the bytecode.
/* Before writing the bytecode, deref paths are lowered into an offset (within the pertaining
* regset) from the start of the variable, to the part of the variable that is referenced.
* This offset is stored using two fields, one for a variable part and other for a constant
* part, which are added together:
* - rel_offset: An offset given by an instruction node, in whole registers.
* - const_offset: A constant number of register components.
* Since the type information cannot longer be retrieved from the offset alone, the type is
* stored in the data_type field. */
struct hlsl_src offset;
* stored in the data_type field, which remains NULL if the deref hasn't been lowered yet. */
struct hlsl_src rel_offset;
unsigned int const_offset;
struct hlsl_type *data_type;
};
/* Whether the path has been lowered to an offset or not. */
static inline bool hlsl_deref_is_lowered(const struct hlsl_deref *deref)
{
return !!deref->data_type;
}
struct hlsl_ir_load
{
struct hlsl_ir_node node;
......@@ -710,6 +738,8 @@ struct hlsl_scope
struct hlsl_scope *upper;
/* The scope was created for the loop statement. */
bool loop;
/* The scope was created for the switch statement. */
bool _switch;
};
struct hlsl_profile_info
......@@ -947,6 +977,12 @@ static inline struct hlsl_ir_index *hlsl_ir_index(const struct hlsl_ir_node *nod
return CONTAINING_RECORD(node, struct hlsl_ir_index, node);
}
static inline struct hlsl_ir_switch *hlsl_ir_switch(const struct hlsl_ir_node *node)
{
assert(node->type == HLSL_IR_SWITCH);
return CONTAINING_RECORD(node, struct hlsl_ir_switch, node);
}
static inline void hlsl_block_init(struct hlsl_block *block)
{
list_init(&block->instrs);
......@@ -1120,6 +1156,9 @@ bool hlsl_copy_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, const struc
void hlsl_cleanup_deref(struct hlsl_deref *deref);
void hlsl_cleanup_semantic(struct hlsl_semantic *semantic);
void hlsl_cleanup_ir_switch_cases(struct list *cases);
void hlsl_free_ir_switch_case(struct hlsl_ir_switch_case *c);
void hlsl_replace_node(struct hlsl_ir_node *old, struct hlsl_ir_node *new);
void hlsl_free_attribute(struct hlsl_attribute *attr);
......@@ -1213,6 +1252,10 @@ struct hlsl_ir_node *hlsl_new_unary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr
struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct hlsl_type *type,
const struct vkd3d_shader_location *loc, const struct hlsl_semantic *semantic, unsigned int modifiers,
const struct hlsl_reg_reservation *reg_reservation);
struct hlsl_ir_switch_case *hlsl_new_switch_case(struct hlsl_ctx *ctx, unsigned int value, bool is_default,
struct hlsl_block *body, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_switch(struct hlsl_ctx *ctx, struct hlsl_ir_node *selector,
struct list *cases, const struct vkd3d_shader_location *loc);
void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc,
enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5);
......
......@@ -46,7 +46,7 @@ static void update_location(struct hlsl_ctx *ctx, YYLTYPE *loc);
%x pp pp_line pp_pragma pp_ignore
RESERVED1 auto|case|catch|char|class|const_cast|default|delete|dynamic_cast|enum
RESERVED1 auto|catch|char|class|const_cast|delete|dynamic_cast|enum
RESERVED2 explicit|friend|goto|long|mutable|new|operator|private|protected|public
RESERVED3 reinterpret_cast|short|signed|sizeof|static_cast|template|this|throw|try
RESERVED4 typename|union|unsigned|using|virtual
......@@ -73,6 +73,7 @@ ANY (.)
BlendState {return KW_BLENDSTATE; }
break {return KW_BREAK; }
Buffer {return KW_BUFFER; }
case {return KW_CASE; }
cbuffer {return KW_CBUFFER; }
centroid {return KW_CENTROID; }
compile {return KW_COMPILE; }
......@@ -80,6 +81,7 @@ const {return KW_CONST; }
continue {return KW_CONTINUE; }
DepthStencilState {return KW_DEPTHSTENCILSTATE; }
DepthStencilView {return KW_DEPTHSTENCILVIEW; }
default {return KW_DEFAULT; }
discard {return KW_DISCARD; }
do {return KW_DO; }
double {return KW_DOUBLE; }
......
......@@ -162,6 +162,12 @@ static void destroy_block(struct hlsl_block *block)
vkd3d_free(block);
}
static void destroy_switch_cases(struct list *cases)
{
hlsl_cleanup_ir_switch_cases(cases);
vkd3d_free(cases);
}
static bool hlsl_types_are_componentwise_compatible(struct hlsl_ctx *ctx, struct hlsl_type *src,
struct hlsl_type *dst)
{
......@@ -508,6 +514,28 @@ static void resolve_loop_continue(struct hlsl_ctx *ctx, struct hlsl_block *block
}
}
static void check_loop_attributes(struct hlsl_ctx *ctx, const struct parse_attribute_list *attributes,
const struct vkd3d_shader_location *loc)
{
bool has_unroll = false, has_loop = false, has_fastopt = false;
unsigned int i;
for (i = 0; i < attributes->count; ++i)
{
const char *name = attributes->attrs[i]->name;
has_loop |= !strcmp(name, "loop");
has_unroll |= !strcmp(name, "unroll");
has_fastopt |= !strcmp(name, "fastopt");
}
if (has_unroll && has_loop)
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Unroll attribute can't be used with 'loop' attribute.");
if (has_unroll && has_fastopt)
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Unroll attribute can't be used with 'fastopt' attribute.");
}
static struct hlsl_block *create_loop(struct hlsl_ctx *ctx, enum loop_type type,
const struct parse_attribute_list *attributes, struct hlsl_block *init, struct hlsl_block *cond,
struct hlsl_block *iter, struct hlsl_block *body, const struct vkd3d_shader_location *loc)
......@@ -518,6 +546,8 @@ static struct hlsl_block *create_loop(struct hlsl_ctx *ctx, enum loop_type type,
if (attribute_list_has_duplicates(attributes))
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Found duplicate attribute.");
check_loop_attributes(ctx, attributes, loc);
/* Ignore unroll(0) attribute, and any invalid attribute. */
for (i = 0; i < attributes->count; ++i)
{
......@@ -533,8 +563,11 @@ static struct hlsl_block *create_loop(struct hlsl_ctx *ctx, enum loop_type type,
hlsl_warning(ctx, loc, VKD3D_SHADER_ERROR_HLSL_NOT_IMPLEMENTED, "Loop unrolling is not implemented.");
}
}
else if (!strcmp(attr->name, "loop")
|| !strcmp(attr->name, "fastopt")
else if (!strcmp(attr->name, "loop"))
{
/* TODO: this attribute will be used to disable unrolling, once it's implememented. */
}
else if (!strcmp(attr->name, "fastopt")
|| !strcmp(attr->name, "allow_uav_condition"))
{
hlsl_fixme(ctx, loc, "Unhandled attribute '%s'.", attr->name);
......@@ -1180,6 +1213,7 @@ static unsigned int evaluate_static_expression_as_uint(struct hlsl_ctx *ctx, str
case HLSL_IR_RESOURCE_LOAD:
case HLSL_IR_RESOURCE_STORE:
case HLSL_IR_STORE:
case HLSL_IR_SWITCH:
hlsl_error(ctx, &node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
"Expected literal expression.");
}
......@@ -4633,12 +4667,64 @@ static void validate_texture_format_type(struct hlsl_ctx *ctx, struct hlsl_type
}
}
static struct hlsl_scope *get_loop_scope(struct hlsl_scope *scope)
static bool check_continue(struct hlsl_ctx *ctx, const struct hlsl_scope *scope, const struct vkd3d_shader_location *loc)
{
if (scope->_switch)
{
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
"The 'continue' statement is not allowed in 'switch' statements.");
return false;
}
if (scope->loop)
return scope;
return true;
if (scope->upper)
return check_continue(ctx, scope->upper, loc);
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "The 'continue' statement is only allowed in loops.");
return false;
}
static bool is_break_allowed(const struct hlsl_scope *scope)
{
if (scope->loop || scope->_switch)
return true;
return scope->upper ? is_break_allowed(scope->upper) : false;
}
static void check_duplicated_switch_cases(struct hlsl_ctx *ctx, const struct hlsl_ir_switch_case *check, struct list *cases)
{
struct hlsl_ir_switch_case *c;
bool found_duplicate = false;
LIST_FOR_EACH_ENTRY(c, cases, struct hlsl_ir_switch_case, entry)
{
if (check->is_default)
{
if ((found_duplicate = c->is_default))
{
hlsl_error(ctx, &check->loc, VKD3D_SHADER_ERROR_HLSL_DUPLICATE_SWITCH_CASE,
"Found multiple 'default' statements.");
hlsl_note(ctx, &c->loc, VKD3D_SHADER_LOG_ERROR, "The 'default' statement was previously found here.");
}
}
else
{
if (c->is_default) continue;
if ((found_duplicate = (c->value == check->value)))
{
hlsl_error(ctx, &check->loc, VKD3D_SHADER_ERROR_HLSL_DUPLICATE_SWITCH_CASE,
"Found duplicate 'case' statement.");
hlsl_note(ctx, &c->loc, VKD3D_SHADER_LOG_ERROR, "The same 'case %d' statement was previously found here.",
c->value);
}
}
return scope->upper ? get_loop_scope(scope->upper) : NULL;
if (found_duplicate)
break;
}
}
}
......@@ -4679,17 +4765,20 @@ static struct hlsl_scope *get_loop_scope(struct hlsl_scope *scope)
enum hlsl_sampler_dim sampler_dim;
struct hlsl_attribute *attr;
struct parse_attribute_list attr_list;
struct hlsl_ir_switch_case *switch_case;
}
%token KW_BLENDSTATE
%token KW_BREAK
%token KW_BUFFER
%token KW_CASE
%token KW_CBUFFER
%token KW_CENTROID
%token KW_COLUMN_MAJOR
%token KW_COMPILE
%token KW_CONST
%token KW_CONTINUE
%token KW_DEFAULT
%token KW_DEPTHSTENCILSTATE
%token KW_DEPTHSTENCILVIEW
%token KW_DISCARD
......@@ -4796,6 +4885,7 @@ static struct hlsl_scope *get_loop_scope(struct hlsl_scope *scope)
%type <list> type_specs
%type <list> variables_def
%type <list> variables_def_typed
%type <list> switch_cases
%token <name> VAR_IDENTIFIER
%token <name> NEW_IDENTIFIER
......@@ -4838,6 +4928,7 @@ static struct hlsl_scope *get_loop_scope(struct hlsl_scope *scope)
%type <block> statement
%type <block> statement_list
%type <block> struct_declaration_without_vars
%type <block> switch_statement
%type <block> unary_expr
%type <boolval> boolean
......@@ -4876,6 +4967,8 @@ static struct hlsl_scope *get_loop_scope(struct hlsl_scope *scope)
%type <semantic> semantic
%type <switch_case> switch_case
%type <type> field_type
%type <type> named_struct_spec
%type <type> unnamed_struct_spec
......@@ -5357,6 +5450,13 @@ loop_scope_start:
ctx->cur_scope->loop = true;
}
switch_scope_start:
%empty
{
hlsl_push_scope(ctx);
ctx->cur_scope->_switch = true;
}
var_identifier:
VAR_IDENTIFIER
| NEW_IDENTIFIER
......@@ -6185,18 +6285,17 @@ statement:
| jump_statement
| selection_statement
| loop_statement
| switch_statement
jump_statement:
KW_BREAK ';'
{
struct hlsl_ir_node *jump;
/* TODO: allow 'break' in the 'switch' statements. */
if (!get_loop_scope(ctx->cur_scope))
if (!is_break_allowed(ctx->cur_scope))
{
hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
"The 'break' statement must be used inside of a loop.");
"The 'break' statement must be used inside of a loop or a switch.");
}
if (!($$ = make_empty_block(ctx)))
......@@ -6208,13 +6307,8 @@ jump_statement:
| KW_CONTINUE ';'
{
struct hlsl_ir_node *jump;
struct hlsl_scope *scope;
if (!(scope = get_loop_scope(ctx->cur_scope)))
{
hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
"The 'continue' statement must be used inside of a loop.");
}
check_continue(ctx, ctx->cur_scope, &@1);
if (!($$ = make_empty_block(ctx)))
YYABORT;
......@@ -6333,6 +6427,106 @@ loop_statement:
hlsl_pop_scope(ctx);
}
switch_statement:
attribute_list_optional switch_scope_start KW_SWITCH '(' expr ')' '{' switch_cases '}'
{
struct hlsl_ir_node *selector = node_from_block($5);
struct hlsl_ir_node *s;
if (!(selector = add_implicit_conversion(ctx, $5, selector, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), &@5)))
{
destroy_switch_cases($8);
destroy_block($5);
YYABORT;
}
s = hlsl_new_switch(ctx, selector, $8, &@3);
destroy_switch_cases($8);
if (!s)
{
destroy_block($5);
YYABORT;
}
$$ = $5;
hlsl_block_add_instr($$, s);
hlsl_pop_scope(ctx);
}
switch_case:
KW_CASE expr ':' statement_list
{
struct hlsl_ir_switch_case *c;
unsigned int value;
value = evaluate_static_expression_as_uint(ctx, $2, &@2);
c = hlsl_new_switch_case(ctx, value, false, $4, &@2);
destroy_block($2);
destroy_block($4);
if (!c)
YYABORT;
$$ = c;
}
| KW_CASE expr ':'
{
struct hlsl_ir_switch_case *c;
unsigned int value;
value = evaluate_static_expression_as_uint(ctx, $2, &@2);
c = hlsl_new_switch_case(ctx, value, false, NULL, &@2);
destroy_block($2);
if (!c)
YYABORT;
$$ = c;
}
| KW_DEFAULT ':' statement_list
{
struct hlsl_ir_switch_case *c;
c = hlsl_new_switch_case(ctx, 0, true, $3, &@1);
destroy_block($3);
if (!c)
YYABORT;
$$ = c;
}
| KW_DEFAULT ':'
{
struct hlsl_ir_switch_case *c;
if (!(c = hlsl_new_switch_case(ctx, 0, true, NULL, &@1)))
YYABORT;
$$ = c;
}
switch_cases:
switch_case
{
struct hlsl_ir_switch_case *c = LIST_ENTRY($1, struct hlsl_ir_switch_case, entry);
if (!($$ = make_empty_list(ctx)))
{
hlsl_free_ir_switch_case(c);
YYABORT;
}
list_add_head($$, &$1->entry);
}
| switch_cases switch_case
{
$$ = $1;
check_duplicated_switch_cases(ctx, $2, $$);
list_add_tail($$, &$2->entry);
}
expr_optional:
%empty
{
......
......@@ -23,30 +23,21 @@
/* TODO: remove when no longer needed, only used for new_offset_instr_from_deref() */
static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx,
enum hlsl_regset regset, const struct vkd3d_shader_location *loc)
struct hlsl_type *type, struct hlsl_ir_node *base_offset, struct hlsl_ir_node *idx,
enum hlsl_regset regset, unsigned int *offset_component, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *idx_offset = NULL;
struct hlsl_ir_node *c;
hlsl_block_init(block);
switch (type->class)
{
case HLSL_CLASS_VECTOR:
idx_offset = idx;
*offset_component += hlsl_ir_constant(idx)->value.u[0].u;
break;
case HLSL_CLASS_MATRIX:
{
if (!(c = hlsl_new_uint_constant(ctx, 4, loc)))
return NULL;
hlsl_block_add_instr(block, c);
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, c, idx)))
return NULL;
hlsl_block_add_instr(block, idx_offset);
idx_offset = idx;
break;
}
......@@ -54,6 +45,12 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
{
unsigned int size = hlsl_type_get_array_element_reg_size(type->e.array.type, regset);
if (regset == HLSL_REGSET_NUMERIC)
{
assert(size % 4 == 0);
size /= 4;
}
if (!(c = hlsl_new_uint_constant(ctx, size, loc)))
return NULL;
hlsl_block_add_instr(block, c);
......@@ -69,8 +66,16 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
{
unsigned int field_idx = hlsl_ir_constant(idx)->value.u[0].u;
struct hlsl_struct_field *field = &type->e.record.fields[field_idx];
unsigned int field_offset = field->reg_offset[regset];
if (regset == HLSL_REGSET_NUMERIC)
{
assert(*offset_component == 0);
*offset_component = field_offset % 4;
field_offset /= 4;
}
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset[regset], loc)))
if (!(c = hlsl_new_uint_constant(ctx, field_offset, loc)))
return NULL;
hlsl_block_add_instr(block, c);
......@@ -83,27 +88,33 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
vkd3d_unreachable();
}
if (offset)
if (idx_offset)
{
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, offset, idx_offset)))
if (!(base_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, base_offset, idx_offset)))
return NULL;
hlsl_block_add_instr(block, idx_offset);
hlsl_block_add_instr(block, base_offset);
}
return idx_offset;
return base_offset;
}
/* TODO: remove when no longer needed, only used for replace_deref_path_with_offset() */
static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, struct hlsl_block *block,
const struct hlsl_deref *deref, const struct vkd3d_shader_location *loc)
const struct hlsl_deref *deref, unsigned int *offset_component, const struct vkd3d_shader_location *loc)
{
enum hlsl_regset regset = hlsl_deref_get_regset(ctx, deref);
struct hlsl_ir_node *offset = NULL;
struct hlsl_ir_node *offset;
struct hlsl_type *type;
unsigned int i;
*offset_component = 0;
hlsl_block_init(block);
if (!(offset = hlsl_new_uint_constant(ctx, 0, loc)))
return NULL;
hlsl_block_add_instr(block, offset);
assert(deref->var);
type = deref->var->data_type;
......@@ -111,9 +122,14 @@ static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, st
{
struct hlsl_block idx_block;
hlsl_block_init(&idx_block);
if (!(offset = new_offset_from_path_index(ctx, &idx_block, type, offset, deref->path[i].node,
regset, loc)))
regset, offset_component, loc)))
{
hlsl_block_cleanup(&idx_block);
return NULL;
}
hlsl_block_add_block(block, &idx_block);
......@@ -127,14 +143,13 @@ static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, st
static bool replace_deref_path_with_offset(struct hlsl_ctx *ctx, struct hlsl_deref *deref,
struct hlsl_ir_node *instr)
{
struct hlsl_type *type;
unsigned int offset_component;
struct hlsl_ir_node *offset;
struct hlsl_block block;
struct hlsl_type *type;
assert(deref->var);
/* register offsets shouldn't be used before this point is reached. */
assert(!deref->offset.node);
assert(!hlsl_deref_is_lowered(deref));
type = hlsl_deref_get_type(ctx, deref);
......@@ -148,16 +163,35 @@ static bool replace_deref_path_with_offset(struct hlsl_ctx *ctx, struct hlsl_der
deref->data_type = type;
if (!(offset = new_offset_instr_from_deref(ctx, &block, deref, &instr->loc)))
if (!(offset = new_offset_instr_from_deref(ctx, &block, deref, &offset_component, &instr->loc)))
return false;
list_move_before(&instr->entry, &block.instrs);
hlsl_cleanup_deref(deref);
hlsl_src_from_node(&deref->offset, offset);
hlsl_src_from_node(&deref->rel_offset, offset);
deref->const_offset = offset_component;
return true;
}
static bool clean_constant_deref_offset_srcs(struct hlsl_ctx *ctx, struct hlsl_deref *deref,
struct hlsl_ir_node *instr)
{
if (deref->rel_offset.node && deref->rel_offset.node->type == HLSL_IR_CONSTANT)
{
enum hlsl_regset regset = hlsl_deref_get_regset(ctx, deref);
if (regset == HLSL_REGSET_NUMERIC)
deref->const_offset += 4 * hlsl_ir_constant(deref->rel_offset.node)->value.u[0].u;
else
deref->const_offset += hlsl_ir_constant(deref->rel_offset.node)->value.u[0].u;
hlsl_src_remove(&deref->rel_offset);
return true;
}
return false;
}
/* Split uniforms into two variables representing the constant and temp
* registers, and copy the former to the latter, so that writes to uniforms
* work. */
......@@ -575,7 +609,19 @@ bool hlsl_transform_ir(struct hlsl_ctx *ctx, bool (*func)(struct hlsl_ctx *ctx,
progress |= hlsl_transform_ir(ctx, func, &iff->else_block, context);
}
else if (instr->type == HLSL_IR_LOOP)
{
progress |= hlsl_transform_ir(ctx, func, &hlsl_ir_loop(instr)->body, context);
}
else if (instr->type == HLSL_IR_SWITCH)
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
progress |= hlsl_transform_ir(ctx, func, &c->body, context);
}
}
progress |= func(ctx, instr, context);
}
......@@ -835,6 +881,30 @@ static bool lower_return(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fun
}
}
}
else if (instr->type == HLSL_IR_SWITCH)
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
has_early_return |= lower_return(ctx, func, &c->body, true);
}
if (has_early_return)
{
if (in_loop)
{
/* For a 'switch' nested in a loop append a break after the 'switch'. */
insert_early_return_break(ctx, func, instr);
}
else
{
cf_instr = instr;
break;
}
}
}
}
if (return_instr)
......@@ -1639,6 +1709,19 @@ static void copy_propagation_invalidate_from_block(struct hlsl_ctx *ctx, struct
break;
}
case HLSL_IR_SWITCH:
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
copy_propagation_invalidate_from_block(ctx, state, &c->body);
}
break;
}
default:
break;
}
......@@ -1687,6 +1770,28 @@ static bool copy_propagation_process_loop(struct hlsl_ctx *ctx, struct hlsl_ir_l
return progress;
}
static bool copy_propagation_process_switch(struct hlsl_ctx *ctx, struct hlsl_ir_switch *s,
struct copy_propagation_state *state)
{
struct copy_propagation_state inner_state;
struct hlsl_ir_switch_case *c;
bool progress = false;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
copy_propagation_state_init(ctx, &inner_state, state);
progress |= copy_propagation_transform_block(ctx, &c->body, &inner_state);
copy_propagation_state_destroy(&inner_state);
}
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
copy_propagation_invalidate_from_block(ctx, state, &c->body);
}
return progress;
}
static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct copy_propagation_state *state)
{
......@@ -1725,6 +1830,10 @@ static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_b
progress |= copy_propagation_process_loop(ctx, hlsl_ir_loop(instr), state);
break;
case HLSL_IR_SWITCH:
progress |= copy_propagation_process_switch(ctx, hlsl_ir_switch(instr), state);
break;
default:
break;
}
......@@ -2094,6 +2203,118 @@ static bool remove_trivial_conditional_branches(struct hlsl_ctx *ctx, struct hls
return true;
}
static bool normalize_switch_cases(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
{
struct hlsl_ir_switch_case *c, *def = NULL;
bool missing_terminal_break = false;
struct hlsl_ir_node *node;
struct hlsl_ir_jump *jump;
struct hlsl_ir_switch *s;
if (instr->type != HLSL_IR_SWITCH)
return false;
s = hlsl_ir_switch(instr);
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
bool terminal_break = false;
if (list_empty(&c->body.instrs))
{
terminal_break = !!list_next(&s->cases, &c->entry);
}
else
{
node = LIST_ENTRY(list_tail(&c->body.instrs), struct hlsl_ir_node, entry);
if (node->type == HLSL_IR_JUMP)
{
jump = hlsl_ir_jump(node);
terminal_break = jump->type == HLSL_IR_JUMP_BREAK;
}
}
missing_terminal_break |= !terminal_break;
if (!terminal_break)
{
if (c->is_default)
{
hlsl_error(ctx, &c->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
"The 'default' case block is not terminated with 'break' or 'return'.");
}
else
{
hlsl_error(ctx, &c->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
"Switch case block '%u' is not terminated with 'break' or 'return'.", c->value);
}
}
}
if (missing_terminal_break)
return true;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
if (c->is_default)
{
def = c;
/* Remove preceding empty cases. */
while (list_prev(&s->cases, &def->entry))
{
c = LIST_ENTRY(list_prev(&s->cases, &def->entry), struct hlsl_ir_switch_case, entry);
if (!list_empty(&c->body.instrs))
break;
hlsl_free_ir_switch_case(c);
}
if (list_empty(&def->body.instrs))
{
/* Remove following empty cases. */
while (list_next(&s->cases, &def->entry))
{
c = LIST_ENTRY(list_next(&s->cases, &def->entry), struct hlsl_ir_switch_case, entry);
if (!list_empty(&c->body.instrs))
break;
hlsl_free_ir_switch_case(c);
}
/* Merge with the next case. */
if (list_next(&s->cases, &def->entry))
{
c = LIST_ENTRY(list_next(&s->cases, &def->entry), struct hlsl_ir_switch_case, entry);
c->is_default = true;
hlsl_free_ir_switch_case(def);
def = c;
}
}
break;
}
}
if (def)
{
list_remove(&def->entry);
}
else
{
struct hlsl_ir_node *jump;
if (!(def = hlsl_new_switch_case(ctx, 0, true, NULL, &s->node.loc)))
return true;
if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, NULL, &s->node.loc)))
{
hlsl_free_ir_switch_case(def);
return true;
}
hlsl_block_add_instr(&def->body, jump);
}
list_add_tail(&s->cases, &def->entry);
return true;
}
static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
struct hlsl_ir_node *idx;
......@@ -2929,6 +3150,7 @@ static bool dce(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
case HLSL_IR_JUMP:
case HLSL_IR_LOOP:
case HLSL_IR_RESOURCE_STORE:
case HLSL_IR_SWITCH:
break;
}
......@@ -2956,6 +3178,16 @@ static unsigned int index_instructions(struct hlsl_block *block, unsigned int in
index = index_instructions(&hlsl_ir_loop(instr)->body, index);
hlsl_ir_loop(instr)->next_index = index;
}
else if (instr->type == HLSL_IR_SWITCH)
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
index = index_instructions(&c->body, index);
}
}
}
return index;
......@@ -2978,6 +3210,19 @@ static void dump_function(struct rb_entry *entry, void *context)
rb_for_each_entry(&func->overloads, dump_function_decl, ctx);
}
static bool mark_indexable_vars(struct hlsl_ctx *ctx, struct hlsl_deref *deref,
struct hlsl_ir_node *instr)
{
if (!deref->rel_offset.node)
return false;
assert(deref->var);
assert(deref->rel_offset.node->type != HLSL_IR_CONSTANT);
deref->var->indexable = true;
return true;
}
static char get_regset_name(enum hlsl_regset regset)
{
switch (regset)
......@@ -3066,8 +3311,8 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
if (!var->first_write)
var->first_write = loop_first ? min(instr->index, loop_first) : instr->index;
store->rhs.node->last_read = last_read;
if (store->lhs.offset.node)
store->lhs.offset.node->last_read = last_read;
if (store->lhs.rel_offset.node)
store->lhs.rel_offset.node->last_read = last_read;
break;
}
case HLSL_IR_EXPR:
......@@ -3094,8 +3339,8 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
var = load->src.var;
var->last_read = max(var->last_read, last_read);
if (load->src.offset.node)
load->src.offset.node->last_read = last_read;
if (load->src.rel_offset.node)
load->src.rel_offset.node->last_read = last_read;
break;
}
case HLSL_IR_LOOP:
......@@ -3112,14 +3357,14 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
var = load->resource.var;
var->last_read = max(var->last_read, last_read);
if (load->resource.offset.node)
load->resource.offset.node->last_read = last_read;
if (load->resource.rel_offset.node)
load->resource.rel_offset.node->last_read = last_read;
if ((var = load->sampler.var))
{
var->last_read = max(var->last_read, last_read);
if (load->sampler.offset.node)
load->sampler.offset.node->last_read = last_read;
if (load->sampler.rel_offset.node)
load->sampler.rel_offset.node->last_read = last_read;
}
if (load->coords.node)
......@@ -3144,8 +3389,8 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
var = store->resource.var;
var->last_read = max(var->last_read, last_read);
if (store->resource.offset.node)
store->resource.offset.node->last_read = last_read;
if (store->resource.rel_offset.node)
store->resource.rel_offset.node->last_read = last_read;
store->coords.node->last_read = last_read;
store->value.node->last_read = last_read;
break;
......@@ -3173,6 +3418,16 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
jump->condition.node->last_read = last_read;
break;
}
case HLSL_IR_SWITCH:
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
compute_liveness_recurse(&c->body, loop_first, loop_last);
s->selector.node->last_read = last_read;
break;
}
case HLSL_IR_CONSTANT:
break;
}
......@@ -3218,6 +3473,10 @@ struct register_allocator
unsigned int writemask;
unsigned int first_write, last_read;
} *allocations;
/* Indexable temps are allocated separately and always keep their index regardless of their
* lifetime. */
size_t indexable_count;
};
static unsigned int get_available_writemask(const struct register_allocator *allocator,
......@@ -3464,11 +3723,23 @@ static void allocate_variable_temp_register(struct hlsl_ctx *ctx,
if (!var->regs[HLSL_REGSET_NUMERIC].allocated && var->last_read)
{
var->regs[HLSL_REGSET_NUMERIC] = allocate_numeric_registers_for_type(ctx, allocator,
var->first_write, var->last_read, var->data_type);
if (var->indexable)
{
var->regs[HLSL_REGSET_NUMERIC].id = allocator->indexable_count++;
var->regs[HLSL_REGSET_NUMERIC].allocation_size = 1;
var->regs[HLSL_REGSET_NUMERIC].writemask = 0;
var->regs[HLSL_REGSET_NUMERIC].allocated = true;
TRACE("Allocated %s to %s (liveness %u-%u).\n", var->name, debug_register('r',
var->regs[HLSL_REGSET_NUMERIC], var->data_type), var->first_write, var->last_read);
TRACE("Allocated %s to x%u[].\n", var->name, var->regs[HLSL_REGSET_NUMERIC].id);
}
else
{
var->regs[HLSL_REGSET_NUMERIC] = allocate_numeric_registers_for_type(ctx, allocator,
var->first_write, var->last_read, var->data_type);
TRACE("Allocated %s to %s (liveness %u-%u).\n", var->name, debug_register('r',
var->regs[HLSL_REGSET_NUMERIC], var->data_type), var->first_write, var->last_read);
}
}
}
......@@ -3524,6 +3795,18 @@ static void allocate_temp_registers_recurse(struct hlsl_ctx *ctx,
break;
}
case HLSL_IR_SWITCH:
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
allocate_temp_registers_recurse(ctx, &c->body, allocator);
}
break;
}
default:
break;
}
......@@ -3633,6 +3916,18 @@ static void allocate_const_registers_recurse(struct hlsl_ctx *ctx,
break;
}
case HLSL_IR_SWITCH:
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
allocate_const_registers_recurse(ctx, &c->body, allocator);
}
break;
}
default:
break;
}
......@@ -4206,30 +4501,25 @@ bool hlsl_regset_index_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref
bool hlsl_offset_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref, unsigned int *offset)
{
struct hlsl_ir_node *offset_node = deref->offset.node;
enum hlsl_regset regset;
enum hlsl_regset regset = hlsl_deref_get_regset(ctx, deref);
struct hlsl_ir_node *offset_node = deref->rel_offset.node;
unsigned int size;
if (!offset_node)
{
*offset = 0;
return true;
}
*offset = deref->const_offset;
/* We should always have generated a cast to UINT. */
assert(offset_node->data_type->class == HLSL_CLASS_SCALAR
&& offset_node->data_type->base_type == HLSL_TYPE_UINT);
if (offset_node->type != HLSL_IR_CONSTANT)
if (offset_node)
{
/* We should always have generated a cast to UINT. */
assert(offset_node->data_type->class == HLSL_CLASS_SCALAR
&& offset_node->data_type->base_type == HLSL_TYPE_UINT);
assert(offset_node->type != HLSL_IR_CONSTANT);
return false;
*offset = hlsl_ir_constant(offset_node)->value.u[0].u;
regset = hlsl_deref_get_regset(ctx, deref);
}
size = deref->var->data_type->reg_size[regset];
if (*offset >= size)
{
hlsl_error(ctx, &deref->offset.node->loc, VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS,
hlsl_error(ctx, &offset_node->loc, VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS,
"Dereference is out of bounds. %u/%u", *offset, size);
return false;
}
......@@ -4244,8 +4534,8 @@ unsigned int hlsl_offset_from_deref_safe(struct hlsl_ctx *ctx, const struct hlsl
if (hlsl_offset_from_deref(ctx, deref, &offset))
return offset;
hlsl_fixme(ctx, &deref->offset.node->loc, "Dereference with non-constant offset of type %s.",
hlsl_node_type_to_string(deref->offset.node->type));
hlsl_fixme(ctx, &deref->rel_offset.node->loc, "Dereference with non-constant offset of type %s.",
hlsl_node_type_to_string(deref->rel_offset.node->type));
return 0;
}
......@@ -4335,6 +4625,62 @@ static bool type_has_object_components(struct hlsl_type *type)
return false;
}
static void remove_unreachable_code(struct hlsl_ctx *ctx, struct hlsl_block *body)
{
struct hlsl_ir_node *instr, *next;
struct hlsl_block block;
struct list *start;
LIST_FOR_EACH_ENTRY_SAFE(instr, next, &body->instrs, struct hlsl_ir_node, entry)
{
if (instr->type == HLSL_IR_IF)
{
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
remove_unreachable_code(ctx, &iff->then_block);
remove_unreachable_code(ctx, &iff->else_block);
}
else if (instr->type == HLSL_IR_LOOP)
{
struct hlsl_ir_loop *loop = hlsl_ir_loop(instr);
remove_unreachable_code(ctx, &loop->body);
}
else if (instr->type == HLSL_IR_SWITCH)
{
struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
struct hlsl_ir_switch_case *c;
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
remove_unreachable_code(ctx, &c->body);
}
}
}
/* Remove instructions past unconditional jumps. */
LIST_FOR_EACH_ENTRY(instr, &body->instrs, struct hlsl_ir_node, entry)
{
struct hlsl_ir_jump *jump;
if (instr->type != HLSL_IR_JUMP)
continue;
jump = hlsl_ir_jump(instr);
if (jump->type != HLSL_IR_JUMP_BREAK && jump->type != HLSL_IR_JUMP_CONTINUE)
continue;
if (!(start = list_next(&body->instrs, &instr->entry)))
break;
hlsl_block_init(&block);
list_move_slice_tail(&block.instrs, start, list_tail(&body->instrs));
hlsl_block_cleanup(&block);
break;
}
}
int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func,
enum vkd3d_shader_target_type target_type, struct vkd3d_shader_code *out)
{
......@@ -4452,6 +4798,8 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
progress |= hlsl_transform_ir(ctx, remove_trivial_conditional_branches, body, NULL);
}
while (progress);
remove_unreachable_code(ctx, body);
hlsl_transform_ir(ctx, normalize_switch_cases, body, NULL);
lower_ir(ctx, lower_nonconstant_vector_derefs, body);
lower_ir(ctx, lower_casts_to_bool, body);
......@@ -4482,6 +4830,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
/* TODO: move forward, remove when no longer needed */
transform_derefs(ctx, replace_deref_path_with_offset, body);
while (hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, body, NULL));
transform_derefs(ctx, clean_constant_deref_offset_srcs, body);
do
compute_liveness(ctx, entry_func);
......@@ -4492,6 +4841,8 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
if (TRACE_ON())
rb_for_each_entry(&ctx->functions, dump_function, ctx);
transform_derefs(ctx, mark_indexable_vars, body);
calculate_resource_register_counts(ctx);
allocate_register_reservations(ctx);
......
......@@ -581,6 +581,12 @@ static unsigned int shader_signature_find_element_for_reg(const struct shader_si
vkd3d_unreachable();
}
struct signature_element *vsir_signature_find_element_for_reg(const struct shader_signature *signature,
unsigned int reg_idx, unsigned int write_mask)
{
return &signature->elements[shader_signature_find_element_for_reg(signature, reg_idx, write_mask)];
}
static unsigned int range_map_get_register_count(uint8_t range_map[][VKD3D_VEC4_SIZE],
unsigned int register_idx, unsigned int write_mask)
{
......
......@@ -173,7 +173,13 @@ enum vkd3d_shader_input_sysval_semantic vkd3d_siv_from_sysval_indexed(enum vkd3d
{
switch (sysval)
{
case VKD3D_SHADER_SV_COVERAGE:
case VKD3D_SHADER_SV_DEPTH:
case VKD3D_SHADER_SV_DEPTH_GREATER_EQUAL:
case VKD3D_SHADER_SV_DEPTH_LESS_EQUAL:
case VKD3D_SHADER_SV_NONE:
case VKD3D_SHADER_SV_STENCIL_REF:
case VKD3D_SHADER_SV_TARGET:
return VKD3D_SIV_NONE;
case VKD3D_SHADER_SV_POSITION:
return VKD3D_SIV_POSITION;
......@@ -181,6 +187,16 @@ enum vkd3d_shader_input_sysval_semantic vkd3d_siv_from_sysval_indexed(enum vkd3d
return VKD3D_SIV_CLIP_DISTANCE;
case VKD3D_SHADER_SV_CULL_DISTANCE:
return VKD3D_SIV_CULL_DISTANCE;
case VKD3D_SHADER_SV_INSTANCE_ID:
return VKD3D_SIV_INSTANCE_ID;
case VKD3D_SHADER_SV_IS_FRONT_FACE:
return VKD3D_SIV_IS_FRONT_FACE;
case VKD3D_SHADER_SV_PRIMITIVE_ID:
return VKD3D_SIV_PRIMITIVE_ID;
case VKD3D_SHADER_SV_RENDER_TARGET_ARRAY_INDEX:
return VKD3D_SIV_RENDER_TARGET_ARRAY_INDEX;
case VKD3D_SHADER_SV_SAMPLE_INDEX:
return VKD3D_SIV_SAMPLE_INDEX;
case VKD3D_SHADER_SV_TESS_FACTOR_QUADEDGE:
return VKD3D_SIV_QUAD_U0_TESS_FACTOR + index;
case VKD3D_SHADER_SV_TESS_FACTOR_QUADINT:
......@@ -193,6 +209,10 @@ enum vkd3d_shader_input_sysval_semantic vkd3d_siv_from_sysval_indexed(enum vkd3d
return VKD3D_SIV_LINE_DETAIL_TESS_FACTOR;
case VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN:
return VKD3D_SIV_LINE_DENSITY_TESS_FACTOR;
case VKD3D_SHADER_SV_VERTEX_ID:
return VKD3D_SIV_VERTEX_ID;
case VKD3D_SHADER_SV_VIEWPORT_ARRAY_INDEX:
return VKD3D_SIV_VIEWPORT_ARRAY_INDEX;
default:
FIXME("Unhandled sysval %#x, index %u.\n", sysval, index);
return VKD3D_SIV_NONE;
......@@ -3523,6 +3543,14 @@ static bool vkd3d_swizzle_is_equal(unsigned int dst_write_mask,
return vkd3d_compact_swizzle(VKD3D_SHADER_NO_SWIZZLE, dst_write_mask) == vkd3d_compact_swizzle(swizzle, write_mask);
}
static bool vkd3d_swizzle_is_scalar(unsigned int swizzle)
{
unsigned int component_idx = vkd3d_swizzle_get_component(swizzle, 0);
return vkd3d_swizzle_get_component(swizzle, 1) == component_idx
&& vkd3d_swizzle_get_component(swizzle, 2) == component_idx
&& vkd3d_swizzle_get_component(swizzle, 3) == component_idx;
}
static uint32_t spirv_compiler_emit_swizzle(struct spirv_compiler *compiler,
uint32_t val_id, unsigned int val_write_mask, enum vkd3d_shader_component_type component_type,
unsigned int swizzle, unsigned int write_mask)
......@@ -3725,6 +3753,26 @@ static void spirv_compiler_set_ssa_register_id(const struct spirv_compiler *comp
compiler->ssa_register_ids[i] = val_id;
}
static uint32_t spirv_compiler_emit_load_ssa_reg(struct spirv_compiler *compiler,
const struct vkd3d_shader_register *reg, enum vkd3d_shader_component_type component_type,
unsigned int swizzle)
{
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
unsigned int component_idx;
uint32_t type_id, val_id;
val_id = spirv_compiler_get_ssa_register_id(compiler, reg);
assert(val_id);
assert(vkd3d_swizzle_is_scalar(swizzle));
if (reg->dimension == VSIR_DIMENSION_SCALAR)
return val_id;
type_id = vkd3d_spirv_get_type_id(builder, component_type, 1);
component_idx = vkd3d_swizzle_get_component(swizzle, 0);
return vkd3d_spirv_build_op_composite_extract1(builder, type_id, val_id, component_idx);
}
static uint32_t spirv_compiler_emit_load_reg(struct spirv_compiler *compiler,
const struct vkd3d_shader_register *reg, DWORD swizzle, DWORD write_mask)
{
......@@ -3746,7 +3794,7 @@ static uint32_t spirv_compiler_emit_load_reg(struct spirv_compiler *compiler,
component_type = vkd3d_component_type_from_data_type(reg->data_type);
if (reg->type == VKD3DSPR_SSA)
return spirv_compiler_get_ssa_register_id(compiler, reg);
return spirv_compiler_emit_load_ssa_reg(compiler, reg, component_type, swizzle);
if (!spirv_compiler_get_register_info(compiler, reg, &reg_info))
{
......@@ -4592,8 +4640,7 @@ static unsigned int shader_register_get_io_indices(const struct vkd3d_shader_reg
}
static uint32_t spirv_compiler_emit_input(struct spirv_compiler *compiler,
const struct vkd3d_shader_dst_param *dst, enum vkd3d_shader_input_sysval_semantic sysval,
enum vkd3d_shader_interpolation_mode interpolation_mode)
const struct vkd3d_shader_dst_param *dst)
{
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
const struct vkd3d_shader_register *reg = &dst->reg;
......@@ -4601,6 +4648,7 @@ static uint32_t spirv_compiler_emit_input(struct spirv_compiler *compiler,
const struct signature_element *signature_element;
const struct shader_signature *shader_signature;
enum vkd3d_shader_component_type component_type;
enum vkd3d_shader_input_sysval_semantic sysval;
uint32_t type_id, ptr_type_id, float_type_id;
const struct vkd3d_spirv_builtin *builtin;
unsigned int write_mask, reg_write_mask;
......@@ -4622,10 +4670,12 @@ static uint32_t spirv_compiler_emit_input(struct spirv_compiler *compiler,
element_idx = shader_register_get_io_indices(reg, array_sizes);
signature_element = &shader_signature->elements[element_idx];
if ((compiler->shader_type == VKD3D_SHADER_TYPE_HULL || compiler->shader_type == VKD3D_SHADER_TYPE_GEOMETRY)
&& !sysval && signature_element->sysval_semantic)
sysval = vkd3d_siv_from_sysval(signature_element->sysval_semantic);
sysval = vkd3d_siv_from_sysval(signature_element->sysval_semantic);
/* The Vulkan spec does not explicitly forbid passing varyings from the
* TCS to the TES via builtins. However, Mesa doesn't seem to handle it
* well, and we don't actually need them to be in builtins. */
if (compiler->shader_type == VKD3D_SHADER_TYPE_DOMAIN && reg->type != VKD3DSPR_PATCHCONST)
sysval = VKD3D_SIV_NONE;
builtin = get_spirv_builtin_for_sysval(compiler, sysval);
......@@ -4693,7 +4743,7 @@ static uint32_t spirv_compiler_emit_input(struct spirv_compiler *compiler,
if (component_idx)
vkd3d_spirv_build_op_decorate1(builder, input_id, SpvDecorationComponent, component_idx);
spirv_compiler_emit_interpolation_decorations(compiler, input_id, interpolation_mode);
spirv_compiler_emit_interpolation_decorations(compiler, input_id, signature_element->interpolation_mode);
}
var_id = input_id;
......@@ -4806,7 +4856,7 @@ static void spirv_compiler_emit_shader_phase_input(struct spirv_compiler *compil
case VKD3DSPR_INPUT:
case VKD3DSPR_INCONTROLPOINT:
case VKD3DSPR_PATCHCONST:
spirv_compiler_emit_input(compiler, dst, VKD3D_SIV_NONE, VKD3DSIM_NONE);
spirv_compiler_emit_input(compiler, dst);
return;
case VKD3DSPR_PRIMID:
spirv_compiler_emit_input_register(compiler, dst);
......@@ -4977,8 +5027,7 @@ static uint32_t spirv_compiler_emit_shader_phase_builtin_variable(struct spirv_c
return id;
}
static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
const struct vkd3d_shader_dst_param *dst, enum vkd3d_shader_input_sysval_semantic sysval)
static void spirv_compiler_emit_output(struct spirv_compiler *compiler, const struct vkd3d_shader_dst_param *dst)
{
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
const struct vkd3d_shader_register *reg = &dst->reg;
......@@ -4986,6 +5035,7 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
const struct signature_element *signature_element;
enum vkd3d_shader_component_type component_type;
const struct shader_signature *shader_signature;
enum vkd3d_shader_input_sysval_semantic sysval;
const struct vkd3d_spirv_builtin *builtin;
unsigned int write_mask, reg_write_mask;
bool use_private_variable = false;
......@@ -5002,6 +5052,10 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
element_idx = shader_register_get_io_indices(reg, array_sizes);
signature_element = &shader_signature->elements[element_idx];
sysval = vkd3d_siv_from_sysval(signature_element->sysval_semantic);
/* Don't use builtins for TCS -> TES varyings. See spirv_compiler_emit_input(). */
if (compiler->shader_type == VKD3D_SHADER_TYPE_HULL && !is_patch_constant)
sysval = VKD3D_SIV_NONE;
builtin = vkd3d_get_spirv_builtin(compiler, dst->reg.type, sysval);
......@@ -6089,7 +6143,7 @@ static void spirv_compiler_emit_dcl_input(struct spirv_compiler *compiler,
if (spirv_compiler_get_current_shader_phase(compiler))
spirv_compiler_emit_shader_phase_input(compiler, dst);
else if (vkd3d_shader_register_is_input(&dst->reg) || dst->reg.type == VKD3DSPR_PATCHCONST)
spirv_compiler_emit_input(compiler, dst, VKD3D_SIV_NONE, VKD3DSIM_NONE);
spirv_compiler_emit_input(compiler, dst);
else
spirv_compiler_emit_input_register(compiler, dst);
......@@ -6097,25 +6151,10 @@ static void spirv_compiler_emit_dcl_input(struct spirv_compiler *compiler,
compiler->use_vocp = true;
}
static void spirv_compiler_emit_dcl_input_ps(struct spirv_compiler *compiler,
const struct vkd3d_shader_instruction *instruction)
{
spirv_compiler_emit_input(compiler, &instruction->declaration.dst, VKD3D_SIV_NONE, instruction->flags);
}
static void spirv_compiler_emit_dcl_input_ps_sysval(struct spirv_compiler *compiler,
const struct vkd3d_shader_instruction *instruction)
{
const struct vkd3d_shader_register_semantic *semantic = &instruction->declaration.register_semantic;
spirv_compiler_emit_input(compiler, &semantic->reg, semantic->sysval_semantic, instruction->flags);
}
static void spirv_compiler_emit_dcl_input_sysval(struct spirv_compiler *compiler,
const struct vkd3d_shader_instruction *instruction)
{
spirv_compiler_emit_input(compiler, &instruction->declaration.register_semantic.reg,
instruction->declaration.register_semantic.sysval_semantic, VKD3DSIM_NONE);
spirv_compiler_emit_input(compiler, &instruction->declaration.register_semantic.reg);
}
static void spirv_compiler_emit_dcl_output(struct spirv_compiler *compiler,
......@@ -6125,7 +6164,7 @@ static void spirv_compiler_emit_dcl_output(struct spirv_compiler *compiler,
if (vkd3d_shader_register_is_output(&dst->reg)
|| (is_in_fork_or_join_phase(compiler) && vkd3d_shader_register_is_patch_constant(&dst->reg)))
spirv_compiler_emit_output(compiler, dst, VKD3D_SIV_NONE);
spirv_compiler_emit_output(compiler, dst);
else
spirv_compiler_emit_output_register(compiler, dst);
}
......@@ -6133,13 +6172,7 @@ static void spirv_compiler_emit_dcl_output(struct spirv_compiler *compiler,
static void spirv_compiler_emit_dcl_output_siv(struct spirv_compiler *compiler,
const struct vkd3d_shader_instruction *instruction)
{
enum vkd3d_shader_input_sysval_semantic sysval;
const struct vkd3d_shader_dst_param *dst;
dst = &instruction->declaration.register_semantic.reg;
sysval = instruction->declaration.register_semantic.sysval_semantic;
spirv_compiler_emit_output(compiler, dst, sysval);
spirv_compiler_emit_output(compiler, &instruction->declaration.register_semantic.reg);
}
static void spirv_compiler_emit_dcl_stream(struct spirv_compiler *compiler,
......@@ -9301,16 +9334,12 @@ static int spirv_compiler_handle_instruction(struct spirv_compiler *compiler,
case VKD3DSIH_DCL_TGSM_STRUCTURED:
spirv_compiler_emit_dcl_tgsm_structured(compiler, instruction);
break;
case VKD3DSIH_DCL_INPUT_PS:
case VKD3DSIH_DCL_INPUT:
spirv_compiler_emit_dcl_input(compiler, instruction);
break;
case VKD3DSIH_DCL_INPUT_PS:
spirv_compiler_emit_dcl_input_ps(compiler, instruction);
break;
case VKD3DSIH_DCL_INPUT_PS_SGV:
case VKD3DSIH_DCL_INPUT_PS_SIV:
spirv_compiler_emit_dcl_input_ps_sysval(compiler, instruction);
break;
case VKD3DSIH_DCL_INPUT_SGV:
case VKD3DSIH_DCL_INPUT_SIV:
spirv_compiler_emit_dcl_input_sysval(compiler, instruction);
......
......@@ -1070,16 +1070,31 @@ static void shader_sm4_read_declaration_register_semantic(struct vkd3d_shader_in
static void shader_sm4_read_dcl_input_ps(struct vkd3d_shader_instruction *ins, uint32_t opcode,
uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
{
struct vkd3d_shader_dst_param *dst = &ins->declaration.dst;
ins->flags = (opcode_token & VKD3D_SM4_INTERPOLATION_MODE_MASK) >> VKD3D_SM4_INTERPOLATION_MODE_SHIFT;
shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VKD3D_DATA_FLOAT, &ins->declaration.dst);
if (shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VKD3D_DATA_FLOAT, dst))
{
struct signature_element *e = vsir_signature_find_element_for_reg(
&priv->p.shader_desc.input_signature, dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
e->interpolation_mode = ins->flags;
}
}
static void shader_sm4_read_dcl_input_ps_siv(struct vkd3d_shader_instruction *ins, uint32_t opcode,
uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
{
struct vkd3d_shader_dst_param *dst = &ins->declaration.register_semantic.reg;
ins->flags = (opcode_token & VKD3D_SM4_INTERPOLATION_MODE_MASK) >> VKD3D_SM4_INTERPOLATION_MODE_SHIFT;
shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VKD3D_DATA_FLOAT,
&ins->declaration.register_semantic.reg);
if (shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VKD3D_DATA_FLOAT, dst))
{
struct signature_element *e = vsir_signature_find_element_for_reg(
&priv->p.shader_desc.input_signature, dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
e->interpolation_mode = ins->flags;
}
ins->declaration.register_semantic.sysval_semantic = *tokens;
}
......@@ -3732,7 +3747,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct vkd3d_shader_re
struct hlsl_reg hlsl_reg = hlsl_reg_from_deref(ctx, deref);
assert(hlsl_reg.allocated);
reg->type = VKD3DSPR_TEMP;
reg->type = deref->var->indexable ? VKD3DSPR_IDXTEMP : VKD3DSPR_TEMP;
reg->dimension = VSIR_DIMENSION_VEC4;
reg->idx[0].offset = hlsl_reg.id;
reg->idx_count = 1;
......@@ -4251,6 +4266,20 @@ static void write_sm4_dcl_temps(const struct tpf_writer *tpf, uint32_t temp_coun
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_dcl_indexable_temp(const struct tpf_writer *tpf, uint32_t idx,
uint32_t size, uint32_t comp_count)
{
struct sm4_instruction instr =
{
.opcode = VKD3D_SM4_OP_DCL_INDEXABLE_TEMP,
.idx = {idx, size, comp_count},
.idx_count = 3,
};
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_dcl_thread_group(const struct tpf_writer *tpf, const uint32_t thread_count[3])
{
struct sm4_instruction instr =
......@@ -5467,6 +5496,46 @@ static void write_sm4_store(const struct tpf_writer *tpf, const struct hlsl_ir_s
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_switch(const struct tpf_writer *tpf, const struct hlsl_ir_switch *s)
{
const struct hlsl_ir_node *selector = s->selector.node;
struct hlsl_ir_switch_case *c;
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = VKD3D_SM4_OP_SWITCH;
sm4_src_from_node(tpf, &instr.srcs[0], selector, VKD3DSP_WRITEMASK_ALL);
instr.src_count = 1;
write_sm4_instruction(tpf, &instr);
LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
{
memset(&instr, 0, sizeof(instr));
if (c->is_default)
{
instr.opcode = VKD3D_SM4_OP_DEFAULT;
}
else
{
struct hlsl_constant_value value = { .u[0].u = c->value };
instr.opcode = VKD3D_SM4_OP_CASE;
sm4_src_from_constant_value(&instr.srcs[0], &value, 1, VKD3DSP_WRITEMASK_ALL);
instr.src_count = 1;
}
write_sm4_instruction(tpf, &instr);
write_sm4_block(tpf, &c->body);
}
memset(&instr, 0, sizeof(instr));
instr.opcode = VKD3D_SM4_OP_ENDSWITCH;
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_swizzle(const struct tpf_writer *tpf, const struct hlsl_ir_swizzle *swizzle)
{
unsigned int hlsl_swizzle;
......@@ -5554,6 +5623,10 @@ static void write_sm4_block(const struct tpf_writer *tpf, const struct hlsl_bloc
write_sm4_store(tpf, hlsl_ir_store(instr));
break;
case HLSL_IR_SWITCH:
write_sm4_switch(tpf, hlsl_ir_switch(instr));
break;
case HLSL_IR_SWIZZLE:
write_sm4_swizzle(tpf, hlsl_ir_swizzle(instr));
break;
......@@ -5572,6 +5645,7 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx,
struct extern_resource *extern_resources;
unsigned int extern_resources_count, i;
const struct hlsl_buffer *cbuffer;
const struct hlsl_scope *scope;
const struct hlsl_ir_var *var;
size_t token_count_position;
struct tpf_writer tpf;
......@@ -5626,6 +5700,25 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx,
if (ctx->temp_count)
write_sm4_dcl_temps(&tpf, ctx->temp_count);
LIST_FOR_EACH_ENTRY(scope, &ctx->scopes, struct hlsl_scope, entry)
{
LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry)
{
if (var->is_uniform || var->is_input_semantic || var->is_output_semantic)
continue;
if (!var->regs[HLSL_REGSET_NUMERIC].allocated)
continue;
if (var->indexable)
{
unsigned int id = var->regs[HLSL_REGSET_NUMERIC].id;
unsigned int size = align(var->data_type->reg_size[HLSL_REGSET_NUMERIC], 4) / 4;
write_sm4_dcl_indexable_temp(&tpf, id, size, 4);
}
}
}
write_sm4_block(&tpf, &entry_func->body);
write_sm4_ret(&tpf);
......
......@@ -784,6 +784,9 @@ static struct vkd3d_shader_descriptor_info1 *vkd3d_shader_scan_add_descriptor(st
struct vkd3d_shader_scan_descriptor_info1 *info = context->scan_descriptor_info;
struct vkd3d_shader_descriptor_info1 *d;
if (!info)
return NULL;
if (!vkd3d_array_reserve((void **)&info->descriptors, &context->descriptors_size,
info->descriptor_count + 1, sizeof(*info->descriptors)))
{
......@@ -811,9 +814,6 @@ static void vkd3d_shader_scan_constant_buffer_declaration(struct vkd3d_shader_sc
const struct vkd3d_shader_constant_buffer *cb = &instruction->declaration.cb;
struct vkd3d_shader_descriptor_info1 *d;
if (!context->scan_descriptor_info)
return;
if (!(d = vkd3d_shader_scan_add_descriptor(context, VKD3D_SHADER_DESCRIPTOR_TYPE_CBV,
&cb->src.reg, &cb->range, VKD3D_SHADER_RESOURCE_BUFFER, VKD3D_SHADER_RESOURCE_DATA_UINT)))
return;
......@@ -826,9 +826,6 @@ static void vkd3d_shader_scan_sampler_declaration(struct vkd3d_shader_scan_conte
const struct vkd3d_shader_sampler *sampler = &instruction->declaration.sampler;
struct vkd3d_shader_descriptor_info1 *d;
if (!context->scan_descriptor_info)
return;
if (!(d = vkd3d_shader_scan_add_descriptor(context, VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER,
&sampler->src.reg, &sampler->range, VKD3D_SHADER_RESOURCE_NONE, VKD3D_SHADER_RESOURCE_DATA_UINT)))
return;
......@@ -854,9 +851,6 @@ static void vkd3d_shader_scan_resource_declaration(struct vkd3d_shader_scan_cont
struct vkd3d_shader_descriptor_info1 *d;
enum vkd3d_shader_descriptor_type type;
if (!context->scan_descriptor_info)
return;
if (resource->reg.reg.type == VKD3DSPR_UAV)
type = VKD3D_SHADER_DESCRIPTOR_TYPE_UAV;
else
......
......@@ -142,6 +142,7 @@ enum vkd3d_shader_error
VKD3D_SHADER_ERROR_HLSL_RECURSIVE_CALL = 5025,
VKD3D_SHADER_ERROR_HLSL_INCONSISTENT_SAMPLER = 5026,
VKD3D_SHADER_ERROR_HLSL_NON_FINITE_RESULT = 5027,
VKD3D_SHADER_ERROR_HLSL_DUPLICATE_SWITCH_CASE = 5028,
VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION = 5300,
VKD3D_SHADER_WARNING_HLSL_DIVISION_BY_ZERO = 5301,
......@@ -181,6 +182,7 @@ enum vkd3d_shader_error
VKD3D_SHADER_ERROR_DXIL_INVALID_SIGNATURE = 8016,
VKD3D_SHADER_ERROR_DXIL_INVALID_PROPERTIES = 8017,
VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES = 8018,
VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCE_HANDLE = 8019,
VKD3D_SHADER_WARNING_DXIL_UNKNOWN_MAGIC_NUMBER = 8300,
VKD3D_SHADER_WARNING_DXIL_UNKNOWN_SHADER_TYPE = 8301,
......@@ -574,6 +576,7 @@ enum vkd3d_data_type
VKD3D_DATA_UNUSED,
VKD3D_DATA_UINT8,
VKD3D_DATA_UINT64,
VKD3D_DATA_BOOL,
};
static inline bool data_type_is_integer(enum vkd3d_data_type data_type)
......@@ -582,6 +585,11 @@ static inline bool data_type_is_integer(enum vkd3d_data_type data_type)
|| data_type == VKD3D_DATA_UINT64;
}
static inline bool data_type_is_bool(enum vkd3d_data_type data_type)
{
return data_type == VKD3D_DATA_BOOL;
}
enum vsir_dimension
{
VSIR_DIMENSION_NONE,
......@@ -915,6 +923,8 @@ struct shader_signature
unsigned int element_count;
};
struct signature_element *vsir_signature_find_element_for_reg(const struct shader_signature *signature,
unsigned int reg_idx, unsigned int write_mask);
void shader_signature_cleanup(struct shader_signature *signature);
struct vkd3d_shader_desc
......
......@@ -415,8 +415,11 @@ struct d3d12_resource_readback
ID3D12Resource *resource;
};
static void get_resource_readback_with_command_list(ID3D12Resource *resource, unsigned int sub_resource,
struct d3d12_resource_readback *rb, ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list)
#define RESOURCE_STATE_DO_NOT_CHANGE (~0u)
static void get_resource_readback_with_command_list_and_states(ID3D12Resource *resource, unsigned int sub_resource,
struct d3d12_resource_readback *rb, ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list,
D3D12_RESOURCE_STATES initial_state, D3D12_RESOURCE_STATES final_state)
{
D3D12_HEAP_PROPERTIES heap_properties;
D3D12_RESOURCE_DESC resource_desc;
......@@ -444,6 +447,9 @@ static void get_resource_readback_with_command_list(ID3D12Resource *resource, un
rb->rb.row_pitch = align(rb->rb.row_pitch, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
rb->rb.data = NULL;
if (initial_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_sub_resource_state(command_list, resource, sub_resource, initial_state, D3D12_RESOURCE_STATE_COPY_SOURCE);
src_resource = resource;
if (resource_desc.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER && resource_desc.SampleDesc.Count > 1)
{
......@@ -493,6 +499,10 @@ static void get_resource_readback_with_command_list(ID3D12Resource *resource, un
ID3D12GraphicsCommandList_CopyTextureRegion(command_list, &dst_location, 0, 0, 0, &src_location, NULL);
}
if (final_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_sub_resource_state(command_list, resource, sub_resource, D3D12_RESOURCE_STATE_COPY_SOURCE, final_state);
hr = ID3D12GraphicsCommandList_Close(command_list);
assert_that(hr == S_OK, "Failed to close command list, hr %#x.\n", hr);
......@@ -509,6 +519,13 @@ static void get_resource_readback_with_command_list(ID3D12Resource *resource, un
assert_that(hr == S_OK, "Failed to map readback buffer, hr %#x.\n", hr);
}
static void get_resource_readback_with_command_list(ID3D12Resource *resource, unsigned int sub_resource,
struct d3d12_resource_readback *rb, ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list)
{
return get_resource_readback_with_command_list_and_states(resource, sub_resource, rb, queue, command_list,
RESOURCE_STATE_DO_NOT_CHANGE, RESOURCE_STATE_DO_NOT_CHANGE);
}
static unsigned int get_readback_uint(struct resource_readback *rb,
unsigned int x, unsigned int y, unsigned int z)
{
......@@ -654,10 +671,11 @@ static void copy_sub_resource_data(const D3D12_MEMCPY_DEST *dst, const D3D12_SUB
}
}
#define upload_texture_data(a, b, c, d, e) upload_texture_data_(__LINE__, a, b, c, d, e)
static inline void upload_texture_data_(unsigned int line, ID3D12Resource *texture,
#define upload_texture_data_with_states(a, b, c, d, e, f, g) upload_texture_data_with_states_(__LINE__, a, b, c, d, e, f, g)
static inline void upload_texture_data_with_states_(unsigned int line, ID3D12Resource *texture,
const D3D12_SUBRESOURCE_DATA *data, unsigned int sub_resource_count,
ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list)
ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list,
D3D12_RESOURCE_STATES initial_state, D3D12_RESOURCE_STATES final_state)
{
D3D12_TEXTURE_COPY_LOCATION dst_location, src_location;
D3D12_PLACED_SUBRESOURCE_FOOTPRINT *layouts;
......@@ -701,7 +719,13 @@ static inline void upload_texture_data_(unsigned int line, ID3D12Resource *textu
if (resource_desc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
{
if (initial_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_resource_state(command_list, texture, initial_state, D3D12_RESOURCE_STATE_COPY_DEST);
ID3D12GraphicsCommandList_CopyResource(command_list, texture, upload_buffer);
if (final_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_resource_state(command_list, texture, D3D12_RESOURCE_STATE_COPY_DEST, final_state);
}
else
{
......@@ -715,8 +739,14 @@ static inline void upload_texture_data_(unsigned int line, ID3D12Resource *textu
src_location.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
src_location.PlacedFootprint = layouts[i];
if (initial_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_sub_resource_state(command_list, texture, i, initial_state, D3D12_RESOURCE_STATE_COPY_DEST);
ID3D12GraphicsCommandList_CopyTextureRegion(command_list,
&dst_location, 0, 0, 0, &src_location, NULL);
if (final_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_sub_resource_state(command_list, texture, i, D3D12_RESOURCE_STATE_COPY_DEST, final_state);
}
}
......@@ -734,9 +764,19 @@ static inline void upload_texture_data_(unsigned int line, ID3D12Resource *textu
free(row_sizes);
}
#define upload_buffer_data(a, b, c, d, e, f) upload_buffer_data_(__LINE__, a, b, c, d, e, f)
static inline void upload_buffer_data_(unsigned int line, ID3D12Resource *buffer, size_t offset,
size_t size, const void *data, ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list)
#define upload_texture_data(a, b, c, d, e) upload_texture_data_(__LINE__, a, b, c, d, e)
static inline void upload_texture_data_(unsigned int line, ID3D12Resource *texture,
const D3D12_SUBRESOURCE_DATA *data, unsigned int sub_resource_count,
ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list)
{
return upload_texture_data_with_states_(line, texture, data, sub_resource_count, queue, command_list,
RESOURCE_STATE_DO_NOT_CHANGE, RESOURCE_STATE_DO_NOT_CHANGE);
}
#define upload_buffer_data_with_states(a, b, c, d, e, f, g, h) upload_buffer_data_with_states_(__LINE__, a, b, c, d, e, f, g, h)
static inline void upload_buffer_data_with_states_(unsigned int line, ID3D12Resource *buffer, size_t offset,
size_t size, const void *data, ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list,
D3D12_RESOURCE_STATES initial_state, D3D12_RESOURCE_STATES final_state)
{
ID3D12Resource *upload_buffer;
ID3D12Device *device;
......@@ -747,9 +787,15 @@ static inline void upload_buffer_data_(unsigned int line, ID3D12Resource *buffer
upload_buffer = create_upload_buffer_(line, device, size, data);
if (initial_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_resource_state(command_list, buffer, initial_state, D3D12_RESOURCE_STATE_COPY_DEST);
ID3D12GraphicsCommandList_CopyBufferRegion(command_list, buffer, offset,
upload_buffer, 0, size);
if (final_state != RESOURCE_STATE_DO_NOT_CHANGE)
transition_resource_state(command_list, buffer, D3D12_RESOURCE_STATE_COPY_DEST, final_state);
hr = ID3D12GraphicsCommandList_Close(command_list);
ok_(line)(SUCCEEDED(hr), "Failed to close command list, hr %#x.\n", hr);
exec_command_list(queue, command_list);
......@@ -759,6 +805,14 @@ static inline void upload_buffer_data_(unsigned int line, ID3D12Resource *buffer
ID3D12Device_Release(device);
}
#define upload_buffer_data(a, b, c, d, e, f) upload_buffer_data_(__LINE__, a, b, c, d, e, f)
static inline void upload_buffer_data_(unsigned int line, ID3D12Resource *buffer, size_t offset,
size_t size, const void *data, ID3D12CommandQueue *queue, ID3D12GraphicsCommandList *command_list)
{
return upload_buffer_data_with_states_(line, buffer, offset, size, data, queue, command_list,
RESOURCE_STATE_DO_NOT_CHANGE, RESOURCE_STATE_DO_NOT_CHANGE);
}
static HRESULT create_root_signature(ID3D12Device *device, const D3D12_ROOT_SIGNATURE_DESC *desc,
ID3D12RootSignature **root_signature)
{
......
/*
* Copyright 2023 Giovanni Mascellani for CodeWeavers
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#include <stdio.h>
#include <stdbool.h>
#include <windows.h>
#include <shlobj.h>
#define TIMEOUT_MS (10 * 1000)
#define MAX_TIMEOUT_COUNT 3
enum program_result
{
PROGRAM_RESULT_SUCCESS,
PROGRAM_RESULT_TIMEOUT,
PROGRAM_RESULT_FAILURE,
};
static enum program_result run_program(const char *cmdline, const char *log_filename)
{
char cmdline2[1024], log_dirname[1024], *file_part;
enum program_result ret = PROGRAM_RESULT_SUCCESS;
HANDLE log = INVALID_HANDLE_VALUE;
SECURITY_ATTRIBUTES attrs = {0};
PROCESS_INFORMATION info = {0};
DWORD exit_code, wait_result;
STARTUPINFOA startup = {0};
int res;
strcpy(cmdline2, cmdline);
if (GetFullPathNameA(log_filename, sizeof(log_dirname), log_dirname, &file_part) == 0)
{
fprintf(stderr, "Cannot extract the directory name for path %s, last error %ld.\n", log_filename, GetLastError());
ret = PROGRAM_RESULT_FAILURE;
goto out;
}
*file_part = '\0';
res = SHCreateDirectoryExA(NULL, log_dirname, NULL);
if (res != ERROR_SUCCESS && res != ERROR_ALREADY_EXISTS)
{
fprintf(stderr, "Cannot create log directory %s, error %d.\n", log_dirname, res);
ret = PROGRAM_RESULT_FAILURE;
goto out;
}
attrs.nLength = sizeof(attrs);
attrs.bInheritHandle = TRUE;
log = CreateFileA(log_filename, GENERIC_WRITE, 0, &attrs, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
if (log == INVALID_HANDLE_VALUE)
{
fprintf(stderr, "Cannot create log file %s, last error %ld.\n", log_filename, GetLastError());
ret = PROGRAM_RESULT_FAILURE;
goto out;
}
startup.cb = sizeof(startup);
startup.dwFlags = STARTF_USESTDHANDLES;
startup.hStdInput = INVALID_HANDLE_VALUE;
startup.hStdOutput = log;
startup.hStdError = log;
if (!CreateProcessA(NULL, cmdline2, NULL, NULL, TRUE, 0, NULL, NULL, &startup, &info))
{
fprintf(stderr, "Cannot create process %s, last error %ld.\n", cmdline2, GetLastError());
ret = PROGRAM_RESULT_FAILURE;
goto out;
}
wait_result = WaitForSingleObject(info.hProcess, TIMEOUT_MS);
if (wait_result == WAIT_TIMEOUT)
{
fprintf(stderr, "Process timed out, terminating it.\n");
ret = PROGRAM_RESULT_TIMEOUT;
if (!TerminateProcess(info.hProcess, 1))
{
fprintf(stderr, "Cannot terminate process, last error %ld.\n", GetLastError());
goto out;
}
wait_result = WaitForSingleObject(info.hProcess, INFINITE);
}
if (wait_result != WAIT_OBJECT_0)
{
fprintf(stderr, "Cannot wait for process termination, last error %ld.\n", GetLastError());
ret = PROGRAM_RESULT_FAILURE;
goto out;
}
if (!GetExitCodeProcess(info.hProcess, &exit_code))
{
fprintf(stderr, "Cannot retrive the process exit code, last error %ld.\n", GetLastError());
ret = PROGRAM_RESULT_FAILURE;
goto out;
}
ret = exit_code == 0 ? PROGRAM_RESULT_SUCCESS : PROGRAM_RESULT_FAILURE;
printf("%s: %s\n", ret == PROGRAM_RESULT_SUCCESS ? "PASS" : "FAIL", cmdline);
out:
if (info.hProcess && !CloseHandle(info.hProcess))
fprintf(stderr, "Cannot close process, last error %ld.\n", GetLastError());
if (info.hThread && !CloseHandle(info.hThread))
fprintf(stderr, "Cannot close thread, last error %ld.\n", GetLastError());
if (log != INVALID_HANDLE_VALUE && !CloseHandle(log))
fprintf(stderr, "Cannot close log file, last error %ld.\n", GetLastError());
return ret;
}
static bool run_tests_for_directory(const char *commit_dir)
{
char cmdline[1024], log_filename[1024], list_filename[1024], line[1024];
unsigned int success_count = 0, test_count = 0, timeout_count = 0;
const char *test_arch = getenv("TEST_ARCH");
enum program_result result;
FILE *list_file;
bool ret = true;
if (!test_arch)
test_arch = "64";
printf("Building %s\n", commit_dir);
printf("---\n");
sprintf(list_filename, "artifacts/%s/tests/shader_tests.txt", commit_dir);
list_file = fopen(list_filename, "r");
if (!list_file)
{
fprintf(stderr, "Cannot open list file %s, errno %d.\n", list_filename, errno);
ret = false;
}
else
{
while (fgets(line, sizeof(line), list_file) && timeout_count < MAX_TIMEOUT_COUNT)
{
size_t len = strlen(line);
if (line[len - 1] == '\n')
line[--len] = '\0';
sprintf(cmdline, "artifacts/%s/tests/shader_runner.cross%s.exe %s", commit_dir, test_arch, line);
/* Remove the .shader_test suffix. */
line[len - 12] = '\0';
sprintf(log_filename, "artifacts/%s/%s.log", commit_dir, line);
++test_count;
result = run_program(cmdline, log_filename);
success_count += result == PROGRAM_RESULT_SUCCESS;
timeout_count += result == PROGRAM_RESULT_TIMEOUT;
}
fclose(list_file);
}
sprintf(list_filename, "artifacts/%s/tests/crosstests.txt", commit_dir);
list_file = fopen(list_filename, "r");
if (!list_file)
{
fprintf(stderr, "Cannot open list file %s, errno %d.\n", list_filename, errno);
ret = false;
}
else
{
while (fgets(line, sizeof(line), list_file) && timeout_count < MAX_TIMEOUT_COUNT)
{
size_t len = strlen(line);
if (line[len - 1] == '\n')
line[len - 1] = '\0';
sprintf(cmdline, "artifacts/%s/%s.cross%s.exe", commit_dir, line, test_arch);
sprintf(log_filename, "artifacts/%s/%s.log", commit_dir, line);
++test_count;
result = run_program(cmdline, log_filename);
success_count += result == PROGRAM_RESULT_SUCCESS;
timeout_count += result == PROGRAM_RESULT_TIMEOUT;
}
fclose(list_file);
}
if (timeout_count >= MAX_TIMEOUT_COUNT)
fprintf(stderr, "Too many timeouts, aborting tests.\n");
printf("=======\n");
printf("Summary\n");
printf("=======\n");
printf("# TOTAL: %u\n", test_count);
printf("# PASS: %u\n", success_count);
printf("# FAIL: %u\n", test_count - success_count);
if (test_count != success_count)
ret = false;
return ret;
}
int wmain(void)
{
WIN32_FIND_DATAA find_data;
HANDLE find_handle;
bool ret = true;
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
find_handle = FindFirstFileA("artifacts/*-*", &find_data);
if (find_handle == INVALID_HANDLE_VALUE)
{
fprintf(stderr, "Cannot list commits, last error %ld.\n", GetLastError());
ret = false;
}
else
{
do
{
ret &= run_tests_for_directory(find_data.cFileName);
} while (FindNextFileA(find_handle, &find_data));
if (GetLastError() != ERROR_NO_MORE_FILES)
{
fprintf(stderr, "Cannot list tests, last error %ld.\n", GetLastError());
ret = false;
}
FindClose(find_handle);
}
return !ret;
}