Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • wine/vkd3d
  • stefan/vkd3d
  • cmccarthy/vkd3d
  • giomasce/vkd3d
  • fcasas/vkd3d
  • jactry/vkd3d
  • ReDress/vkd3d
  • mstorsjo/vkd3d
  • huw/vkd3d
  • julliard/vkd3d
  • bshanks/vkd3d
  • zfigura/vkd3d
  • hverbeet/vkd3d
  • DarkShadow44/vkd3d
  • nsivov/vkd3d
  • dhary686/vkd3d
  • Mystral/vkd3d
  • maljaf/vkd3d
  • smcv/vkd3d
  • flibitijibibo/vkd3d
  • q4a/vkd3d
  • jsikorski/vkd3d
  • alesliehughes/vkd3d-alesliehughes
  • vitorhnn/vkd3d
  • agusev/vkd3d
  • etang-cw/vkd3d
  • petrathekat/vkd3d
  • simon.mr995/vkd3d
  • sgwaki/vkd3d
  • jacek/vkd3d
  • fweimer/vkd3d
  • Clara/vkd3d
  • disini/vkd3d
  • antenabr2/vkd3d
  • gilvbp/vkd3d
  • yshui/vkd3d
  • shaunren/vkd3d
  • jennetsaryyewa96/vkd3d
  • Jamesattay/vkd3d
  • zacemmneeto77/vkd3d
  • GermanAizek/vkd3d
  • opespinach/vkd3d
  • ruslanboyka201/vkd3d
  • navi/vkd3d
  • Feifan/vkd3d
  • yashmhmdly172/vkd3d
  • Sec32fun32/vkd3d
  • ritalat/vkd3d
  • ivyl/vkd3d
  • baikaishiuc/vkd3d
  • austin987/vkd3d
  • TornadoCookie/vkd3d
52 results
Show changes
Commits on Source (23)
Showing
with 645 additions and 479 deletions
......@@ -116,6 +116,7 @@ vkd3d_shader_tests = \
tests/hlsl-struct-array.shader_test \
tests/hlsl-struct-assignment.shader_test \
tests/hlsl-struct-semantics.shader_test \
tests/hlsl-ternary.shader_test \
tests/hlsl-transpose.shader_test \
tests/hlsl-type-names.shader_test \
tests/hlsl-vector-indexing.shader_test \
......
......@@ -91,7 +91,7 @@ const char *debugstr_w(const WCHAR *wstr, size_t wchar_size);
#define FIXME_ONCE VKD3D_DBG_LOG_ONCE(FIXME, WARN)
#define VKD3D_DEBUG_ENV_NAME(name) const char *vkd3d_dbg_env_name = name
#define VKD3D_DEBUG_ENV_NAME(name) const char *const vkd3d_dbg_env_name = name
static inline const char *debugstr_guid(const GUID *guid)
{
......
......@@ -40,9 +40,9 @@
#define VKD3D_DEBUG_BUFFER_COUNT 64
#define VKD3D_DEBUG_BUFFER_SIZE 512
extern const char *vkd3d_dbg_env_name;
extern const char *const vkd3d_dbg_env_name;
static const char *debug_level_names[] =
static const char *const debug_level_names[] =
{
/* VKD3D_DBG_LEVEL_NONE */ "none",
/* VKD3D_DBG_LEVEL_ERR */ "err",
......
......@@ -207,7 +207,7 @@ struct vkd3d_sm1_opcode_info
struct vkd3d_shader_sm1_parser
{
const struct vkd3d_sm1_opcode_info *opcode_table;
const uint32_t *start, *end;
const uint32_t *start, *end, *ptr;
bool abort;
struct vkd3d_shader_parser p;
......@@ -671,7 +671,7 @@ static void shader_sm1_read_immconst(struct vkd3d_shader_sm1_parser *sm1, const
static void shader_sm1_read_comment(struct vkd3d_shader_sm1_parser *sm1)
{
const uint32_t **ptr = &sm1->p.ptr;
const uint32_t **ptr = &sm1->ptr;
const char *comment;
unsigned int size;
size_t remaining;
......@@ -738,13 +738,12 @@ static void shader_sm1_validate_instruction(struct vkd3d_shader_sm1_parser *sm1,
}
}
static void shader_sm1_read_instruction(struct vkd3d_shader_parser *parser, struct vkd3d_shader_instruction *ins)
static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, struct vkd3d_shader_instruction *ins)
{
struct vkd3d_shader_sm1_parser *sm1 = vkd3d_shader_sm1_parser(parser);
struct vkd3d_shader_src_param *src_params, *predicate;
const struct vkd3d_sm1_opcode_info *opcode_info;
struct vkd3d_shader_dst_param *dst_param;
const uint32_t **ptr = &parser->ptr;
const uint32_t **ptr = &sm1->ptr;
uint32_t opcode_token;
const uint32_t *p;
bool predicated;
......@@ -758,11 +757,11 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_parser *parser, stru
goto fail;
}
++parser->location.line;
++sm1->p.location.line;
opcode_token = read_u32(ptr);
if (!(opcode_info = shader_sm1_get_opcode_info(sm1, opcode_token & VKD3D_SM1_OPCODE_MASK)))
{
vkd3d_shader_parser_error(parser, VKD3D_SHADER_ERROR_D3DBC_INVALID_OPCODE,
vkd3d_shader_parser_error(&sm1->p, VKD3D_SHADER_ERROR_D3DBC_INVALID_OPCODE,
"Invalid opcode %#x (token 0x%08x, shader version %u.%u).",
opcode_token & VKD3D_SM1_OPCODE_MASK, opcode_token,
sm1->p.shader_version.major, sm1->p.shader_version.minor);
......@@ -775,14 +774,14 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_parser *parser, stru
ins->raw = false;
ins->structured = false;
predicated = !!(opcode_token & VKD3D_SM1_INSTRUCTION_PREDICATED);
ins->predicate = predicate = predicated ? shader_parser_get_src_params(parser, 1) : NULL;
ins->predicate = predicate = predicated ? shader_parser_get_src_params(&sm1->p, 1) : NULL;
ins->dst_count = opcode_info->dst_count;
ins->dst = dst_param = shader_parser_get_dst_params(parser, ins->dst_count);
ins->dst = dst_param = shader_parser_get_dst_params(&sm1->p, ins->dst_count);
ins->src_count = opcode_info->src_count;
ins->src = src_params = shader_parser_get_src_params(parser, ins->src_count);
ins->src = src_params = shader_parser_get_src_params(&sm1->p, ins->src_count);
if ((!predicate && predicated) || (!src_params && ins->src_count) || (!dst_param && ins->dst_count))
{
vkd3d_shader_parser_error(parser, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY, "Out of memory.");
vkd3d_shader_parser_error(&sm1->p, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY, "Out of memory.");
goto fail;
}
......@@ -852,10 +851,9 @@ fail:
*ptr = sm1->end;
}
static bool shader_sm1_is_end(struct vkd3d_shader_parser *parser)
static bool shader_sm1_is_end(struct vkd3d_shader_sm1_parser *sm1)
{
struct vkd3d_shader_sm1_parser *sm1 = vkd3d_shader_sm1_parser(parser);
const uint32_t **ptr = &parser->ptr;
const uint32_t **ptr = &sm1->ptr;
shader_sm1_read_comment(sm1);
......@@ -938,7 +936,7 @@ static enum vkd3d_result shader_sm1_init(struct vkd3d_shader_sm1_parser *sm1,
shader_desc = &sm1->p.shader_desc;
shader_desc->byte_code = code;
shader_desc->byte_code_size = code_size;
sm1->p.ptr = sm1->start;
sm1->ptr = sm1->start;
return VKD3D_OK;
}
......@@ -965,7 +963,7 @@ int vkd3d_shader_sm1_parser_create(const struct vkd3d_shader_compile_info *compi
}
instructions = &sm1->p.instructions;
while (!shader_sm1_is_end(&sm1->p))
while (!shader_sm1_is_end(sm1))
{
if (!shader_instruction_array_reserve(instructions, instructions->count + 1))
{
......@@ -975,7 +973,7 @@ int vkd3d_shader_sm1_parser_create(const struct vkd3d_shader_compile_info *compi
return VKD3D_ERROR_OUT_OF_MEMORY;
}
ins = &instructions->elements[instructions->count];
shader_sm1_read_instruction(&sm1->p, ins);
shader_sm1_read_instruction(sm1, ins);
if (ins->handler_idx == VKD3DSIH_INVALID)
{
......
......@@ -57,7 +57,7 @@ int vkd3d_shader_serialize_dxbc(size_t section_count, const struct vkd3d_shader_
put_u32(&buffer, TAG_DXBC);
checksum_position = bytecode_get_next_offset(&buffer);
checksum_position = bytecode_get_size(&buffer);
for (i = 0; i < 4; ++i)
put_u32(&buffer, 0);
......@@ -65,13 +65,13 @@ int vkd3d_shader_serialize_dxbc(size_t section_count, const struct vkd3d_shader_
size_position = put_u32(&buffer, 0);
put_u32(&buffer, section_count);
offsets_position = bytecode_get_next_offset(&buffer);
offsets_position = bytecode_get_size(&buffer);
for (i = 0; i < section_count; ++i)
put_u32(&buffer, 0);
for (i = 0; i < section_count; ++i)
{
set_u32(&buffer, offsets_position + i * sizeof(uint32_t), bytecode_get_next_offset(&buffer));
set_u32(&buffer, offsets_position + i * sizeof(uint32_t), bytecode_align(&buffer));
put_u32(&buffer, sections[i].tag);
put_u32(&buffer, sections[i].data.size);
bytecode_put_bytes(&buffer, sections[i].data.code, sections[i].data.size);
......@@ -103,7 +103,7 @@ struct vkd3d_shader_src_param_entry
struct vkd3d_shader_sm4_parser
{
const uint32_t *start, *end;
const uint32_t *start, *end, *ptr;
unsigned int output_map[MAX_REG_OUTPUT];
......@@ -1459,14 +1459,13 @@ static void shader_sm4_read_instruction_modifier(DWORD modifier, struct vkd3d_sh
}
}
static void shader_sm4_read_instruction(struct vkd3d_shader_parser *parser, struct vkd3d_shader_instruction *ins)
static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, struct vkd3d_shader_instruction *ins)
{
struct vkd3d_shader_sm4_parser *sm4 = vkd3d_shader_sm4_parser(parser);
const struct vkd3d_sm4_opcode_info *opcode_info;
uint32_t opcode_token, opcode, previous_token;
struct vkd3d_shader_dst_param *dst_params;
struct vkd3d_shader_src_param *src_params;
const uint32_t **ptr = &parser->ptr;
const uint32_t **ptr = &sm4->ptr;
unsigned int i, len;
size_t remaining;
const uint32_t *p;
......@@ -1479,7 +1478,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_parser *parser, stru
}
remaining = sm4->end - *ptr;
++parser->location.line;
++sm4->p.location.line;
opcode_token = *(*ptr)++;
opcode = opcode_token & VKD3D_SM4_OPCODE_MASK;
......@@ -1517,11 +1516,11 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_parser *parser, stru
ins->predicate = NULL;
ins->dst_count = strnlen(opcode_info->dst_info, SM4_MAX_DST_COUNT);
ins->src_count = strnlen(opcode_info->src_info, SM4_MAX_SRC_COUNT);
ins->src = src_params = shader_parser_get_src_params(parser, ins->src_count);
ins->src = src_params = shader_parser_get_src_params(&sm4->p, ins->src_count);
if (!src_params && ins->src_count)
{
ERR("Failed to allocate src parameters.\n");
vkd3d_shader_parser_error(parser, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
vkd3d_shader_parser_error(&sm4->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
ins->handler_idx = VKD3DSIH_INVALID;
return;
}
......@@ -1559,11 +1558,11 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_parser *parser, stru
precise = (opcode_token & VKD3D_SM5_PRECISE_MASK) >> VKD3D_SM5_PRECISE_SHIFT;
ins->flags |= precise << VKD3DSI_PRECISE_SHIFT;
ins->dst = dst_params = shader_parser_get_dst_params(parser, ins->dst_count);
ins->dst = dst_params = shader_parser_get_dst_params(&sm4->p, ins->dst_count);
if (!dst_params && ins->dst_count)
{
ERR("Failed to allocate dst parameters.\n");
vkd3d_shader_parser_error(parser, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
vkd3d_shader_parser_error(&sm4->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
ins->handler_idx = VKD3DSIH_INVALID;
return;
}
......@@ -1597,13 +1596,6 @@ fail:
return;
}
static bool shader_sm4_is_end(struct vkd3d_shader_parser *parser)
{
struct vkd3d_shader_sm4_parser *sm4 = vkd3d_shader_sm4_parser(parser);
return parser->ptr == sm4->end;
}
static const struct vkd3d_shader_parser_ops shader_sm4_parser_ops =
{
.parser_destroy = shader_sm4_destroy,
......@@ -1673,7 +1665,7 @@ static bool shader_sm4_init(struct vkd3d_shader_sm4_parser *sm4, const uint32_t
if (!vkd3d_shader_parser_init(&sm4->p, message_context, source_name, &version, &shader_sm4_parser_ops,
token_count / 7u + 20))
return false;
sm4->p.ptr = sm4->start;
sm4->ptr = sm4->start;
memset(sm4->output_map, 0xff, sizeof(sm4->output_map));
for (i = 0; i < output_signature->element_count; ++i)
......@@ -2185,7 +2177,7 @@ int vkd3d_shader_sm4_parser_create(const struct vkd3d_shader_compile_info *compi
}
instructions = &sm4->p.instructions;
while (!shader_sm4_is_end(&sm4->p))
while (sm4->ptr != sm4->end)
{
if (!shader_instruction_array_reserve(instructions, instructions->count + 1))
{
......@@ -2195,7 +2187,7 @@ int vkd3d_shader_sm4_parser_create(const struct vkd3d_shader_compile_info *compi
return VKD3D_ERROR_OUT_OF_MEMORY;
}
ins = &instructions->elements[instructions->count];
shader_sm4_read_instruction(&sm4->p, ins);
shader_sm4_read_instruction(sm4, ins);
if (ins->handler_idx == VKD3DSIH_INVALID)
{
......@@ -2754,7 +2746,7 @@ struct root_signature_writer_context
static size_t get_chunk_offset(struct root_signature_writer_context *context)
{
return bytecode_get_next_offset(&context->buffer) - context->chunk_position;
return bytecode_get_size(&context->buffer) - context->chunk_position;
}
static void shader_write_root_signature_header(struct root_signature_writer_context *context)
......@@ -2770,10 +2762,10 @@ static void shader_write_root_signature_header(struct root_signature_writer_cont
put_u32(buffer, 1);
context->total_size_position = put_u32(buffer, 0xffffffff);
put_u32(buffer, 1); /* chunk count */
put_u32(buffer, bytecode_get_next_offset(buffer) + sizeof(uint32_t)); /* chunk offset */
put_u32(buffer, bytecode_get_size(buffer) + sizeof(uint32_t)); /* chunk offset */
put_u32(buffer, TAG_RTS0);
put_u32(buffer, 0xffffffff);
context->chunk_position = bytecode_get_next_offset(buffer);
context->chunk_position = bytecode_get_size(buffer);
}
static void shader_write_descriptor_ranges(struct vkd3d_bytecode_buffer *buffer,
......@@ -2862,7 +2854,7 @@ static int shader_write_root_parameters(struct root_signature_writer_context *co
size_t parameters_position;
unsigned int i;
parameters_position = bytecode_get_next_offset(buffer);
parameters_position = bytecode_align(buffer);
for (i = 0; i < parameter_count; ++i)
{
put_u32(buffer, versioned_root_signature_get_parameter_type(desc, i));
......
......@@ -461,7 +461,7 @@ static bool init_deref_from_component_index(struct hlsl_ctx *ctx, struct hlsl_bl
struct hlsl_type *path_type;
struct hlsl_ir_constant *c;
list_init(&block->instrs);
hlsl_block_init(block);
path_len = 0;
path_type = hlsl_deref_get_type(ctx, prefix);
......@@ -490,7 +490,7 @@ static bool init_deref_from_component_index(struct hlsl_ctx *ctx, struct hlsl_bl
hlsl_free_instr_list(&block->instrs);
return false;
}
list_add_tail(&block->instrs, &c->node.entry);
hlsl_block_add_instr(block, &c->node);
hlsl_src_from_node(&deref->path[deref_path_len++], &c->node);
}
......@@ -1012,7 +1012,7 @@ struct hlsl_ir_store *hlsl_new_store_component(struct hlsl_ctx *ctx, struct hlsl
struct hlsl_block comp_path_block;
struct hlsl_ir_store *store;
list_init(&block->instrs);
hlsl_block_init(block);
if (!(store = hlsl_alloc(ctx, sizeof(*store))))
return NULL;
......@@ -1023,13 +1023,13 @@ struct hlsl_ir_store *hlsl_new_store_component(struct hlsl_ctx *ctx, struct hlsl
vkd3d_free(store);
return NULL;
}
list_move_tail(&block->instrs, &comp_path_block.instrs);
hlsl_block_add_block(block, &comp_path_block);
hlsl_src_from_node(&store->rhs, rhs);
if (type_is_single_reg(rhs->data_type))
store->writemask = (1 << rhs->data_type->dimx) - 1;
list_add_tail(&block->instrs, &store->node.entry);
hlsl_block_add_instr(block, &store->node);
return store;
}
......@@ -1150,8 +1150,8 @@ struct hlsl_ir_if *hlsl_new_if(struct hlsl_ctx *ctx, struct hlsl_ir_node *condit
return NULL;
init_node(&iff->node, HLSL_IR_IF, NULL, &loc);
hlsl_src_from_node(&iff->condition, condition);
list_init(&iff->then_instrs.instrs);
list_init(&iff->else_instrs.instrs);
hlsl_block_init(&iff->then_instrs);
hlsl_block_init(&iff->else_instrs);
return iff;
}
......@@ -1201,7 +1201,7 @@ struct hlsl_ir_load *hlsl_new_load_component(struct hlsl_ctx *ctx, struct hlsl_b
struct hlsl_block comp_path_block;
struct hlsl_ir_load *load;
list_init(&block->instrs);
hlsl_block_init(block);
if (!(load = hlsl_alloc(ctx, sizeof(*load))))
return NULL;
......@@ -1215,9 +1215,9 @@ struct hlsl_ir_load *hlsl_new_load_component(struct hlsl_ctx *ctx, struct hlsl_b
vkd3d_free(load);
return NULL;
}
list_move_tail(&block->instrs, &comp_path_block.instrs);
hlsl_block_add_block(block, &comp_path_block);
list_add_tail(&block->instrs, &load->node.entry);
hlsl_block_add_instr(block, &load->node);
return load;
}
......@@ -1298,7 +1298,7 @@ struct hlsl_ir_loop *hlsl_new_loop(struct hlsl_ctx *ctx, struct vkd3d_shader_loc
if (!(loop = hlsl_alloc(ctx, sizeof(*loop))))
return NULL;
init_node(&loop->node, HLSL_IR_LOOP, NULL, &loc);
list_init(&loop->body.instrs);
hlsl_block_init(&loop->body);
return loop;
}
......@@ -1601,7 +1601,7 @@ struct hlsl_ir_function_decl *hlsl_new_func_decl(struct hlsl_ctx *ctx,
if (!(decl = hlsl_alloc(ctx, sizeof(*decl))))
return NULL;
list_init(&decl->body.instrs);
hlsl_block_init(&decl->body);
decl->return_type = return_type;
decl->parameters = *parameters;
decl->loc = *loc;
......@@ -1622,11 +1622,11 @@ struct hlsl_ir_function_decl *hlsl_new_func_decl(struct hlsl_ctx *ctx,
if (!(constant = hlsl_new_bool_constant(ctx, false, loc)))
return decl;
list_add_tail(&decl->body.instrs, &constant->node.entry);
hlsl_block_add_instr(&decl->body, &constant->node);
if (!(store = hlsl_new_simple_store(ctx, decl->early_return_var, &constant->node)))
return decl;
list_add_tail(&decl->body.instrs, &store->node.entry);
hlsl_block_add_instr(&decl->body, &store->node);
return decl;
}
......
......@@ -857,6 +857,21 @@ static inline struct hlsl_ir_swizzle *hlsl_ir_swizzle(const struct hlsl_ir_node
return CONTAINING_RECORD(node, struct hlsl_ir_swizzle, node);
}
static inline void hlsl_block_init(struct hlsl_block *block)
{
list_init(&block->instrs);
}
static inline void hlsl_block_add_instr(struct hlsl_block *block, struct hlsl_ir_node *instr)
{
list_add_tail(&block->instrs, &instr->entry);
}
static inline void hlsl_block_add_block(struct hlsl_block *block, struct hlsl_block *add)
{
list_move_tail(&block->instrs, &add->instrs);
}
static inline void hlsl_src_from_node(struct hlsl_src *src, struct hlsl_ir_node *node)
{
src->node = node;
......
......@@ -425,7 +425,7 @@ static bool append_conditional_break(struct hlsl_ctx *ctx, struct list *cond_lis
if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, condition->loc)))
return false;
list_add_head(&iff->then_instrs.instrs, &jump->node.entry);
hlsl_block_add_instr(&iff->then_instrs, &jump->node);
return true;
}
......@@ -5743,7 +5743,16 @@ conditional_expr:
logicor_expr
| logicor_expr '?' expr ':' assignment_expr
{
hlsl_fixme(ctx, &@$, "Ternary operator.");
struct hlsl_ir_node *cond = node_from_list($1), *first = node_from_list($3), *second = node_from_list($5);
list_move_tail($1, $3);
list_move_tail($1, $5);
vkd3d_free($3);
vkd3d_free($5);
if (!hlsl_add_conditional(ctx, $1, cond, first, second))
YYABORT;
$$ = $1;
}
assignment_expr:
......
......@@ -29,7 +29,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
struct hlsl_ir_node *idx_offset = NULL;
struct hlsl_ir_constant *c;
list_init(&block->instrs);
hlsl_block_init(block);
switch (type->class)
{
......@@ -41,11 +41,11 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
{
if (!(c = hlsl_new_uint_constant(ctx, 4, loc)))
return NULL;
list_add_tail(&block->instrs, &c->node.entry);
hlsl_block_add_instr(block, &c->node);
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, &c->node, idx)))
return NULL;
list_add_tail(&block->instrs, &idx_offset->entry);
hlsl_block_add_instr(block, idx_offset);
break;
}
......@@ -56,11 +56,11 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
if (!(c = hlsl_new_uint_constant(ctx, size, loc)))
return NULL;
list_add_tail(&block->instrs, &c->node.entry);
hlsl_block_add_instr(block, &c->node);
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, &c->node, idx)))
return NULL;
list_add_tail(&block->instrs, &idx_offset->entry);
hlsl_block_add_instr(block, idx_offset);
break;
}
......@@ -72,7 +72,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset[regset], loc)))
return NULL;
list_add_tail(&block->instrs, &c->node.entry);
hlsl_block_add_instr(block, &c->node);
idx_offset = &c->node;
......@@ -87,7 +87,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
{
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, offset, idx_offset)))
return NULL;
list_add_tail(&block->instrs, &idx_offset->entry);
hlsl_block_add_instr(block, idx_offset);
}
return idx_offset;
......@@ -101,7 +101,7 @@ static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, st
struct hlsl_type *type;
unsigned int i;
list_init(&block->instrs);
hlsl_block_init(block);
assert(deref->var);
type = deref->var->data_type;
......@@ -114,7 +114,7 @@ static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, st
deref->offset_regset, loc)))
return NULL;
list_move_tail(&block->instrs, &idx_block.instrs);
hlsl_block_add_block(block, &idx_block);
type = hlsl_get_element_type_from_path_index(ctx, type, deref->path[i].node);
}
......@@ -530,7 +530,7 @@ static void insert_early_return_break(struct hlsl_ctx *ctx,
if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, cf_instr->loc)))
return;
list_add_tail(&iff->then_instrs.instrs, &jump->node.entry);
hlsl_block_add_instr(&iff->then_instrs, &jump->node);
}
/* Remove HLSL_IR_JUMP_RETURN calls by altering subsequent control flow. */
......@@ -687,15 +687,15 @@ static bool lower_return(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fun
if (!(load = hlsl_new_var_load(ctx, func->early_return_var, cf_instr->loc)))
return false;
list_add_tail(&block->instrs, &load->node.entry);
hlsl_block_add_instr(block, &load->node);
if (!(not = hlsl_new_unary_expr(ctx, HLSL_OP1_LOGIC_NOT, &load->node, cf_instr->loc)))
return false;
list_add_tail(&block->instrs, &not->entry);
hlsl_block_add_instr(block, not);
if (!(iff = hlsl_new_if(ctx, not, cf_instr->loc)))
return false;
list_add_tail(&block->instrs, &iff->node.entry);
hlsl_block_add_instr(block, &iff->node);
list_move_slice_tail(&iff->then_instrs.instrs, list_next(&block->instrs, &cf_instr->entry), tail);
......@@ -721,7 +721,7 @@ static bool lower_calls(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *
hlsl_error(ctx, &call->node.loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED,
"Function \"%s\" is not defined.", decl->func->name);
list_init(&block.instrs);
hlsl_block_init(&block);
if (!hlsl_clone_block(ctx, &block, &decl->body))
return false;
list_move_before(&call->node.entry, &block.instrs);
......@@ -1943,11 +1943,11 @@ struct hlsl_ir_load *hlsl_add_conditional(struct hlsl_ctx *ctx, struct list *ins
if (!(store = hlsl_new_simple_store(ctx, var, if_true)))
return NULL;
list_add_tail(&iff->then_instrs.instrs, &store->node.entry);
hlsl_block_add_instr(&iff->then_instrs, &store->node);
if (!(store = hlsl_new_simple_store(ctx, var, if_false)))
return NULL;
list_add_tail(&iff->else_instrs.instrs, &store->node.entry);
hlsl_block_add_instr(&iff->else_instrs, &store->node);
if (!(load = hlsl_new_var_load(ctx, var, condition->loc)))
return NULL;
......@@ -2816,7 +2816,7 @@ static void allocate_temp_registers(struct hlsl_ctx *ctx, struct hlsl_ir_functio
static void allocate_semantic_register(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, unsigned int *counter, bool output)
{
static const char *shader_names[] =
static const char *const shader_names[] =
{
[VKD3D_SHADER_TYPE_PIXEL] = "Pixel",
[VKD3D_SHADER_TYPE_VERTEX] = "Vertex",
......
......@@ -261,7 +261,7 @@ static void write_sm1_type(struct vkd3d_bytecode_buffer *buffer, struct hlsl_typ
write_sm1_type(buffer, field->type, ctab_start);
}
fields_offset = bytecode_get_next_offset(buffer) - ctab_start;
fields_offset = bytecode_align(buffer) - ctab_start;
for (i = 0; i < field_count; ++i)
{
......@@ -351,7 +351,7 @@ static void write_sm1_uniforms(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffe
put_u32(buffer, 0); /* FIXME: flags */
put_u32(buffer, 0); /* FIXME: target string */
vars_start = bytecode_get_next_offset(buffer);
vars_start = bytecode_align(buffer);
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
{
......@@ -402,7 +402,7 @@ static void write_sm1_uniforms(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffe
offset = put_string(buffer, vkd3d_shader_get_version(NULL, NULL));
set_u32(buffer, creator_offset, offset - ctab_start);
ctab_end = bytecode_get_next_offset(buffer);
ctab_end = bytecode_align(buffer);
set_u32(buffer, size_offset, vkd3d_make_u32(D3DSIO_COMMENT, (ctab_end - ctab_offset) / sizeof(uint32_t)));
}
......
......@@ -147,6 +147,16 @@ bool hlsl_sm4_usage_from_semantic(struct hlsl_ctx *ctx, const struct hlsl_semant
return true;
}
static void add_section(struct dxbc_writer *dxbc, uint32_t tag, struct vkd3d_bytecode_buffer *buffer)
{
/* Native D3DDisassemble() expects at least the sizes of the ISGN and OSGN
* sections to be aligned. Without this, the sections themselves will be
* aligned, but their reported sizes won't. */
size_t size = bytecode_align(buffer);
dxbc_writer_add_section(dxbc, tag, buffer->data, size);
}
static void write_sm4_signature(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc, bool output)
{
struct vkd3d_bytecode_buffer buffer = {0};
......@@ -252,7 +262,7 @@ static void write_sm4_signature(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc,
set_u32(&buffer, count_position, i);
dxbc_writer_add_section(dxbc, output ? TAG_OSGN : TAG_ISGN, buffer.data, buffer.size);
add_section(dxbc, output ? TAG_OSGN : TAG_ISGN, &buffer);
}
static const struct hlsl_type *get_array_type(const struct hlsl_type *type)
......@@ -387,7 +397,7 @@ static void write_sm4_type(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *b
write_sm4_type(ctx, buffer, field->type);
}
fields_offset = bytecode_get_next_offset(buffer);
fields_offset = bytecode_align(buffer);
for (i = 0; i < field_count; ++i)
{
......@@ -583,7 +593,7 @@ static void write_sm4_rdef(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc)
/* Bound resources. */
resources_offset = bytecode_get_next_offset(&buffer);
resources_offset = bytecode_align(&buffer);
set_u32(&buffer, resource_position, resources_offset);
for (i = 0; i < extern_resources_count; ++i)
......@@ -656,7 +666,7 @@ static void write_sm4_rdef(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc)
/* Buffers. */
cbuffers_offset = bytecode_get_next_offset(&buffer);
cbuffers_offset = bytecode_align(&buffer);
set_u32(&buffer, cbuffer_position, cbuffers_offset);
LIST_FOR_EACH_ENTRY(cbuffer, &ctx->buffers, struct hlsl_buffer, entry)
{
......@@ -692,7 +702,7 @@ static void write_sm4_rdef(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc)
i = 0;
LIST_FOR_EACH_ENTRY(cbuffer, &ctx->buffers, struct hlsl_buffer, entry)
{
size_t vars_start = bytecode_get_next_offset(&buffer);
size_t vars_start = bytecode_align(&buffer);
if (!cbuffer->reg.allocated)
continue;
......@@ -745,7 +755,7 @@ static void write_sm4_rdef(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc)
creator_offset = put_string(&buffer, vkd3d_shader_get_version(NULL, NULL));
set_u32(&buffer, creator_position, creator_offset);
dxbc_writer_add_section(dxbc, TAG_RDEF, buffer.data, buffer.size);
add_section(dxbc, TAG_RDEF, &buffer);
vkd3d_free(extern_resources);
}
......@@ -1581,6 +1591,7 @@ static void write_sm4_cast(struct hlsl_ctx *ctx,
switch (dst_type->base_type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
switch (src_type->base_type)
{
......@@ -1662,10 +1673,6 @@ static void write_sm4_cast(struct hlsl_ctx *ctx,
}
break;
case HLSL_TYPE_HALF:
hlsl_fixme(ctx, &expr->node.loc, "SM4 cast to half.");
break;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(ctx, &expr->node.loc, "SM4 cast to double.");
break;
......@@ -2505,7 +2512,7 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx,
set_u32(&buffer, token_count_position, bytecode_get_size(&buffer) / sizeof(uint32_t));
dxbc_writer_add_section(dxbc, TAG_SHDR, buffer.data, buffer.size);
add_section(dxbc, TAG_SHDR, &buffer);
vkd3d_free(extern_resources);
}
......
......@@ -84,6 +84,11 @@ static void shader_instruction_eliminate_phase_instance_id(struct vkd3d_shader_i
shader_register_eliminate_phase_addressing((struct vkd3d_shader_register *)&ins->dst[i].reg, instance_id);
}
static bool normaliser_is_in_control_point_phase(const struct vkd3d_shader_normaliser *normaliser)
{
return normaliser->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE;
}
static bool normaliser_is_in_fork_or_join_phase(const struct vkd3d_shader_normaliser *normaliser)
{
return normaliser->phase == VKD3DSIH_HS_FORK_PHASE || normaliser->phase == VKD3DSIH_HS_JOIN_PHASE;
......@@ -217,6 +222,22 @@ static enum vkd3d_result shader_normaliser_flatten_phases(struct vkd3d_shader_no
return VKD3D_OK;
}
static void shader_register_init(struct vkd3d_shader_register *reg,
enum vkd3d_shader_register_type reg_type, enum vkd3d_data_type data_type)
{
reg->type = reg_type;
reg->precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT;
reg->non_uniform = false;
reg->data_type = data_type;
reg->idx[0].offset = ~0u;
reg->idx[0].rel_addr = NULL;
reg->idx[1].offset = ~0u;
reg->idx[1].rel_addr = NULL;
reg->idx[2].offset = ~0u;
reg->idx[2].rel_addr = NULL;
reg->immconst_type = VKD3D_IMMCONST_SCALAR;
}
static void shader_instruction_init(struct vkd3d_shader_instruction *ins, enum vkd3d_shader_opcode handler_idx)
{
memset(ins, 0, sizeof(*ins));
......@@ -258,6 +279,170 @@ enum vkd3d_result shader_normaliser_flatten_hull_shader_phases(struct vkd3d_shad
return result;
}
static struct vkd3d_shader_src_param *shader_normaliser_create_outpointid_param(struct vkd3d_shader_normaliser *normaliser)
{
struct vkd3d_shader_src_param *rel_addr;
if (!(rel_addr = shader_src_param_allocator_get(&normaliser->instructions.src_params, 1)))
return NULL;
shader_register_init(&rel_addr->reg, VKD3DSPR_OUTPOINTID, VKD3D_DATA_UINT);
rel_addr->swizzle = 0;
rel_addr->modifiers = 0;
return rel_addr;
}
static bool shader_dst_param_normalise_outpointid(struct vkd3d_shader_dst_param *dst_param,
struct vkd3d_shader_normaliser *normaliser)
{
struct vkd3d_shader_register *reg = &dst_param->reg;
if (normaliser_is_in_control_point_phase(normaliser) && reg->type == VKD3DSPR_OUTPUT)
{
if (reg->idx[2].offset != ~0u)
{
FIXME("Cannot insert phase id.\n");
return false;
}
if (reg->idx[1].offset != ~0u)
{
WARN("Unexpected address at index 1.\n");
reg->idx[2] = reg->idx[1];
}
reg->idx[1] = reg->idx[0];
/* The control point id param is implicit here. Avoid later complications by inserting it. */
reg->idx[0].offset = 0;
reg->idx[0].rel_addr = normaliser->outpointid_param;
}
return true;
}
static void shader_dst_param_io_init(struct vkd3d_shader_dst_param *param,
const struct vkd3d_shader_signature_element *e, enum vkd3d_shader_register_type reg_type)
{
param->write_mask = e->mask;
param->modifiers = 0;
param->shift = 0;
shader_register_init(&param->reg, reg_type, vkd3d_data_type_from_component_type(e->component_type));
}
static enum vkd3d_result shader_normaliser_emit_hs_input(struct vkd3d_shader_normaliser *normaliser,
const struct vkd3d_shader_signature *s, unsigned int input_control_point_count, unsigned int dst)
{
const struct vkd3d_shader_signature_element *e;
struct vkd3d_shader_instruction *ins;
struct vkd3d_shader_dst_param *param;
unsigned int i, count;
for (i = 0, count = 1; i < s->element_count; ++i)
count += !!s->elements[i].used_mask;
if (!shader_instruction_array_reserve(&normaliser->instructions, normaliser->instructions.count + count))
return VKD3D_ERROR_OUT_OF_MEMORY;
memmove(&normaliser->instructions.elements[dst + count], &normaliser->instructions.elements[dst],
(normaliser->instructions.count - dst) * sizeof(*normaliser->instructions.elements));
normaliser->instructions.count += count;
ins = &normaliser->instructions.elements[dst];
shader_instruction_init(ins, VKD3DSIH_HS_CONTROL_POINT_PHASE);
ins->flags = 1;
++ins;
for (i = 0; i < s->element_count; ++i)
{
e = &s->elements[i];
if (!e->used_mask)
continue;
if (e->sysval_semantic != VKD3D_SHADER_SV_NONE)
{
shader_instruction_init(ins, VKD3DSIH_DCL_INPUT_SIV);
param = &ins->declaration.register_semantic.reg;
ins->declaration.register_semantic.sysval_semantic = vkd3d_siv_from_sysval(e->sysval_semantic);
}
else
{
shader_instruction_init(ins, VKD3DSIH_DCL_INPUT);
param = &ins->declaration.dst;
}
shader_dst_param_io_init(param, e, VKD3DSPR_INPUT);
param->reg.idx[0].offset = input_control_point_count;
param->reg.idx[1].offset = i;
++ins;
}
return VKD3D_OK;
}
enum vkd3d_result shader_normaliser_normalise_hull_shader_control_point_io(struct vkd3d_shader_normaliser *normaliser,
const struct vkd3d_shader_signature *input_signature)
{
struct vkd3d_shader_instruction_array *instructions = &normaliser->instructions;
unsigned int input_control_point_count;
struct vkd3d_shader_instruction *ins;
unsigned int i, j;
if (!(normaliser->outpointid_param = shader_normaliser_create_outpointid_param(normaliser)))
{
ERR("Failed to allocate src param.\n");
return VKD3D_ERROR_OUT_OF_MEMORY;
}
normaliser->phase = VKD3DSIH_INVALID;
for (i = 0; i < normaliser->instructions.count; ++i)
{
ins = &instructions->elements[i];
switch (ins->handler_idx)
{
case VKD3DSIH_HS_CONTROL_POINT_PHASE:
case VKD3DSIH_HS_FORK_PHASE:
case VKD3DSIH_HS_JOIN_PHASE:
normaliser->phase = ins->handler_idx;
break;
default:
if (shader_instruction_is_dcl(ins))
break;
for (j = 0; j < ins->dst_count; ++j)
{
if (!shader_dst_param_normalise_outpointid((struct vkd3d_shader_dst_param *)&ins->dst[j],
normaliser))
return VKD3D_ERROR_INVALID_ARGUMENT;
}
break;
}
}
normaliser->phase = VKD3DSIH_INVALID;
input_control_point_count = 1;
for (i = 0; i < instructions->count; ++i)
{
ins = &instructions->elements[i];
switch (ins->handler_idx)
{
case VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT:
input_control_point_count = ins->declaration.count;
break;
case VKD3DSIH_HS_CONTROL_POINT_PHASE:
return VKD3D_OK;
case VKD3DSIH_HS_FORK_PHASE:
case VKD3DSIH_HS_JOIN_PHASE:
return shader_normaliser_emit_hs_input(normaliser, input_signature, input_control_point_count, i);
default:
break;
}
}
return VKD3D_OK;
}
void shader_normaliser_destroy(struct vkd3d_shader_normaliser *normaliser)
{
shader_instruction_array_destroy(&normaliser->instructions);
......
This diff is collapsed.
......@@ -340,9 +340,24 @@ void vkd3d_shader_error(struct vkd3d_shader_message_context *context, const stru
va_end(args);
}
size_t bytecode_align(struct vkd3d_bytecode_buffer *buffer)
{
size_t aligned_size = align(buffer->size, 4);
if (!vkd3d_array_reserve((void **)&buffer->data, &buffer->capacity, aligned_size, 1))
{
buffer->status = VKD3D_ERROR_OUT_OF_MEMORY;
return aligned_size;
}
memset(buffer->data + buffer->size, 0xab, aligned_size - buffer->size);
buffer->size = aligned_size;
return aligned_size;
}
size_t bytecode_put_bytes(struct vkd3d_bytecode_buffer *buffer, const void *bytes, size_t size)
{
size_t offset = bytecode_get_next_offset(buffer);
size_t offset = bytecode_align(buffer);
if (buffer->status)
return offset;
......@@ -352,7 +367,6 @@ size_t bytecode_put_bytes(struct vkd3d_bytecode_buffer *buffer, const void *byte
buffer->status = VKD3D_ERROR_OUT_OF_MEMORY;
return offset;
}
memset(buffer->data + buffer->size, 0xab, offset - buffer->size);
memcpy(buffer->data + offset, bytes, size);
buffer->size = offset + size;
return offset;
......@@ -1168,75 +1182,73 @@ int vkd3d_shader_scan(const struct vkd3d_shader_compile_info *compile_info, char
return ret;
}
static int compile_dxbc_tpf(const struct vkd3d_shader_compile_info *compile_info,
static int vkd3d_shader_parser_compile(struct vkd3d_shader_parser *parser,
const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context)
{
struct vkd3d_shader_scan_descriptor_info scan_descriptor_info;
struct vkd3d_glsl_generator *glsl_generator;
struct vkd3d_shader_compile_info scan_info;
struct spirv_compiler *spirv_compiler;
struct vkd3d_shader_parser *parser;
int ret;
vkd3d_shader_dump_shader(compile_info->source_type, parser->shader_version.type, &compile_info->source);
scan_info = *compile_info;
scan_descriptor_info.type = VKD3D_SHADER_STRUCTURE_TYPE_SCAN_DESCRIPTOR_INFO;
scan_descriptor_info.next = scan_info.next;
scan_info.next = &scan_descriptor_info;
if ((ret = scan_dxbc(&scan_info, message_context)) < 0)
if ((ret = scan_with_parser(&scan_info, message_context, parser)) < 0)
return ret;
if ((ret = vkd3d_shader_sm4_parser_create(compile_info, message_context, &parser)) < 0)
switch (compile_info->target_type)
{
WARN("Failed to initialise shader parser.\n");
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return ret;
}
case VKD3D_SHADER_TARGET_D3D_ASM:
ret = vkd3d_dxbc_binary_to_text(&parser->instructions, &parser->shader_version, compile_info, out);
break;
vkd3d_shader_dump_shader(compile_info->source_type, parser->shader_version.type, &compile_info->source);
case VKD3D_SHADER_TARGET_GLSL:
if (!(glsl_generator = vkd3d_glsl_generator_create(&parser->shader_version,
message_context, &parser->location)))
{
ERR("Failed to create GLSL generator.\n");
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return VKD3D_ERROR;
}
if (compile_info->target_type == VKD3D_SHADER_TARGET_D3D_ASM)
{
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
ret = vkd3d_dxbc_binary_to_text(&parser->instructions, &parser->shader_version, compile_info, out);
vkd3d_shader_parser_destroy(parser);
return ret;
}
ret = vkd3d_glsl_generator_generate(glsl_generator, parser, out);
vkd3d_glsl_generator_destroy(glsl_generator);
break;
if (compile_info->target_type == VKD3D_SHADER_TARGET_GLSL)
{
struct vkd3d_glsl_generator *glsl_generator;
case VKD3D_SHADER_TARGET_SPIRV_BINARY:
case VKD3D_SHADER_TARGET_SPIRV_TEXT:
ret = spirv_compile(parser, &scan_descriptor_info, compile_info, out, message_context);
break;
if (!(glsl_generator = vkd3d_glsl_generator_create(&parser->shader_version,
message_context, &parser->location)))
{
ERR("Failed to create GLSL generator.\n");
vkd3d_shader_parser_destroy(parser);
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return VKD3D_ERROR;
}
default:
/* Validation should prevent us from reaching this. */
assert(0);
}
ret = vkd3d_glsl_generator_generate(glsl_generator, parser, out);
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return ret;
}
vkd3d_glsl_generator_destroy(glsl_generator);
vkd3d_shader_parser_destroy(parser);
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return ret;
}
static int compile_dxbc_tpf(const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context)
{
struct vkd3d_shader_parser *parser;
int ret;
if (!(spirv_compiler = spirv_compiler_create(&parser->shader_version, &parser->shader_desc,
compile_info, &scan_descriptor_info, message_context, &parser->location)))
if ((ret = vkd3d_shader_sm4_parser_create(compile_info, message_context, &parser)) < 0)
{
ERR("Failed to create DXBC compiler.\n");
vkd3d_shader_parser_destroy(parser);
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return VKD3D_ERROR;
WARN("Failed to initialise shader parser.\n");
return ret;
}
ret = spirv_compiler_generate_spirv(spirv_compiler, compile_info, parser, out);
ret = vkd3d_shader_parser_compile(parser, compile_info, out, message_context);
spirv_compiler_destroy(spirv_compiler);
vkd3d_shader_parser_destroy(parser);
vkd3d_shader_free_scan_descriptor_info(&scan_descriptor_info);
return ret;
}
......
......@@ -993,10 +993,8 @@ struct vkd3d_shader_parser
struct vkd3d_shader_desc shader_desc;
struct vkd3d_shader_version shader_version;
const uint32_t *ptr;
const struct vkd3d_shader_parser_ops *ops;
struct vkd3d_shader_instruction_array instructions;
size_t instruction_idx;
};
struct vkd3d_shader_parser_ops
......@@ -1071,6 +1069,8 @@ struct vkd3d_bytecode_buffer
int status;
};
/* Align to the next 4-byte offset, and return that offset. */
size_t bytecode_align(struct vkd3d_bytecode_buffer *buffer);
size_t bytecode_put_bytes(struct vkd3d_bytecode_buffer *buffer, const void *bytes, size_t size);
void set_u32(struct vkd3d_bytecode_buffer *buffer, size_t offset, uint32_t value);
......@@ -1094,11 +1094,6 @@ static inline size_t bytecode_get_size(struct vkd3d_bytecode_buffer *buffer)
return buffer->size;
}
static inline size_t bytecode_get_next_offset(struct vkd3d_bytecode_buffer *buffer)
{
return align(buffer->size, 4);
}
uint32_t vkd3d_parse_integer(const char *s);
struct vkd3d_shader_message_context
......@@ -1150,16 +1145,10 @@ void vkd3d_glsl_generator_destroy(struct vkd3d_glsl_generator *generator);
#define SPIRV_MAX_SRC_COUNT 6
struct spirv_compiler;
struct spirv_compiler *spirv_compiler_create(const struct vkd3d_shader_version *shader_version,
const struct vkd3d_shader_desc *shader_desc, const struct vkd3d_shader_compile_info *compile_info,
int spirv_compile(struct vkd3d_shader_parser *parser,
const struct vkd3d_shader_scan_descriptor_info *scan_descriptor_info,
struct vkd3d_shader_message_context *message_context, const struct vkd3d_shader_location *location);
int spirv_compiler_generate_spirv(struct spirv_compiler *compiler,
const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_parser *parser,
struct vkd3d_shader_code *spirv);
void spirv_compiler_destroy(struct spirv_compiler *compiler);
const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context);
void vkd3d_compute_dxbc_checksum(const void *dxbc, size_t size, uint32_t checksum[4]);
......@@ -1211,6 +1200,14 @@ static inline enum vkd3d_data_type vkd3d_data_type_from_component_type(
}
}
enum vkd3d_shader_input_sysval_semantic vkd3d_siv_from_sysval_indexed(enum vkd3d_shader_sysval_semantic sysval,
unsigned int index);
static inline enum vkd3d_shader_input_sysval_semantic vkd3d_siv_from_sysval(enum vkd3d_shader_sysval_semantic sysval)
{
return vkd3d_siv_from_sysval_indexed(sysval, 0);
}
static inline unsigned int vkd3d_write_mask_get_component_idx(DWORD write_mask)
{
unsigned int i;
......@@ -1342,11 +1339,15 @@ struct vkd3d_shader_normaliser
unsigned int instance_count;
unsigned int phase_body_idx;
enum vkd3d_shader_opcode phase;
struct vkd3d_shader_src_param *outpointid_param;
};
void shader_normaliser_init(struct vkd3d_shader_normaliser *normaliser,
struct vkd3d_shader_instruction_array *instructions);
enum vkd3d_result shader_normaliser_flatten_hull_shader_phases(struct vkd3d_shader_normaliser *normaliser);
enum vkd3d_result shader_normaliser_normalise_hull_shader_control_point_io(struct vkd3d_shader_normaliser *normaliser,
const struct vkd3d_shader_signature *input_signature);
void shader_normaliser_destroy(struct vkd3d_shader_normaliser *normaliser);
#endif /* __VKD3D_SHADER_PRIVATE_H */
......@@ -326,6 +326,9 @@ static void d3d12_heap_destroy(struct d3d12_heap *heap)
vkd3d_private_store_destroy(&heap->private_store);
if (heap->map_ptr)
VK_CALL(vkUnmapMemory(device->vk_device, heap->vk_memory));
VK_CALL(vkFreeMemory(device->vk_device, heap->vk_memory, NULL));
vkd3d_mutex_destroy(&heap->mutex);
......@@ -444,97 +447,6 @@ struct d3d12_heap *unsafe_impl_from_ID3D12Heap(ID3D12Heap *iface)
return impl_from_ID3D12Heap(iface);
}
static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset,
struct d3d12_resource *resource, void **data)
{
struct d3d12_device *device = heap->device;
HRESULT hr = S_OK;
VkResult vr;
vkd3d_mutex_lock(&heap->mutex);
assert(!resource->map_count || heap->map_ptr);
if (!resource->map_count)
{
if (!heap->map_ptr)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
TRACE("Mapping heap %p.\n", heap);
assert(!heap->map_count);
if ((vr = VK_CALL(vkMapMemory(device->vk_device, heap->vk_memory,
0, VK_WHOLE_SIZE, 0, &heap->map_ptr))) < 0)
{
WARN("Failed to map device memory, vr %d.\n", vr);
heap->map_ptr = NULL;
}
hr = hresult_from_vk_result(vr);
}
if (heap->map_ptr)
++heap->map_count;
}
if (hr == S_OK)
{
assert(heap->map_ptr);
if (data)
*data = (BYTE *)heap->map_ptr + offset;
++resource->map_count;
}
else
{
assert(!heap->map_ptr);
if (data)
*data = NULL;
}
vkd3d_mutex_unlock(&heap->mutex);
return hr;
}
static void d3d12_heap_unmap(struct d3d12_heap *heap, struct d3d12_resource *resource)
{
struct d3d12_device *device = heap->device;
vkd3d_mutex_lock(&heap->mutex);
if (!resource->map_count)
{
WARN("Resource %p is not mapped.\n", resource);
goto done;
}
--resource->map_count;
if (resource->map_count)
goto done;
if (!heap->map_count)
{
ERR("Heap %p is not mapped.\n", heap);
goto done;
}
--heap->map_count;
if (!heap->map_count)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
TRACE("Unmapping heap %p, ptr %p.\n", heap, heap->map_ptr);
VK_CALL(vkUnmapMemory(device->vk_device, heap->vk_memory));
heap->map_ptr = NULL;
}
done:
vkd3d_mutex_unlock(&heap->mutex);
}
static HRESULT validate_heap_desc(const D3D12_HEAP_DESC *desc, const struct d3d12_resource *resource)
{
if (!resource && !desc->SizeInBytes)
......@@ -559,11 +471,18 @@ static HRESULT validate_heap_desc(const D3D12_HEAP_DESC *desc, const struct d3d1
return S_OK;
}
static VkMemoryPropertyFlags d3d12_heap_get_memory_property_flags(const struct d3d12_heap *heap)
{
return heap->device->memory_properties.memoryTypes[heap->vk_memory_type].propertyFlags;
}
static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
struct d3d12_device *device, const D3D12_HEAP_DESC *desc, const struct d3d12_resource *resource)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
VkMemoryRequirements memory_requirements;
VkDeviceSize vk_memory_size;
VkResult vr;
HRESULT hr;
heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl;
......@@ -639,6 +558,18 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
else
heap->resource_count = 1;
if (d3d12_heap_get_memory_property_flags(heap) & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
{
if ((vr = VK_CALL(vkMapMemory(device->vk_device,
heap->vk_memory, 0, VK_WHOLE_SIZE, 0, &heap->map_ptr))) < 0)
{
heap->map_ptr = NULL;
ERR("Failed to map memory, vr %d.\n", vr);
d3d12_heap_destroy(heap);
return hresult_from_vk_result(hr);
}
}
return S_OK;
}
......@@ -1233,12 +1164,55 @@ static HRESULT STDMETHODCALLTYPE d3d12_resource_GetDevice(ID3D12Resource *iface,
return d3d12_device_query_interface(resource->device, iid, device);
}
static void *d3d12_resource_get_map_ptr(struct d3d12_resource *resource)
{
assert(resource->heap->map_ptr);
return (uint8_t *)resource->heap->map_ptr + resource->heap_offset;
}
static void d3d12_resource_get_vk_range(struct d3d12_resource *resource,
uint64_t offset, uint64_t size, VkMappedMemoryRange *vk_range)
{
vk_range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
vk_range->pNext = NULL;
vk_range->memory = resource->heap->vk_memory;
vk_range->offset = resource->heap_offset + offset;
vk_range->size = size;
}
static void d3d12_resource_invalidate(struct d3d12_resource *resource, uint64_t offset, uint64_t size)
{
const struct vkd3d_vk_device_procs *vk_procs = &resource->device->vk_procs;
VkMappedMemoryRange vk_range;
VkResult vr;
if (d3d12_heap_get_memory_property_flags(resource->heap) & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
return;
d3d12_resource_get_vk_range(resource, offset, size, &vk_range);
if ((vr = VK_CALL(vkInvalidateMappedMemoryRanges(resource->device->vk_device, 1, &vk_range))) < 0)
ERR("Failed to invalidate memory, vr %d.\n", vr);
}
static void d3d12_resource_flush(struct d3d12_resource *resource, uint64_t offset, uint64_t size)
{
const struct vkd3d_vk_device_procs *vk_procs = &resource->device->vk_procs;
VkMappedMemoryRange vk_range;
VkResult vr;
if (d3d12_heap_get_memory_property_flags(resource->heap) & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
return;
d3d12_resource_get_vk_range(resource, offset, size, &vk_range);
if ((vr = VK_CALL(vkFlushMappedMemoryRanges(resource->device->vk_device, 1, &vk_range))) < 0)
ERR("Failed to flush memory, vr %d.\n", vr);
}
static HRESULT STDMETHODCALLTYPE d3d12_resource_Map(ID3D12Resource *iface, UINT sub_resource,
const D3D12_RANGE *read_range, void **data)
{
struct d3d12_resource *resource = impl_from_ID3D12Resource(iface);
unsigned int sub_resource_count;
HRESULT hr;
TRACE("iface %p, sub_resource %u, read_range %p, data %p.\n",
iface, sub_resource, read_range, data);
......@@ -1269,15 +1243,18 @@ static HRESULT STDMETHODCALLTYPE d3d12_resource_Map(ID3D12Resource *iface, UINT
return E_NOTIMPL;
}
WARN("Ignoring read range %p.\n", read_range);
if (FAILED(hr = d3d12_heap_map(resource->heap, resource->heap_offset, resource, data)))
WARN("Failed to map resource %p, hr %#x.\n", resource, hr);
if (data)
{
*data = d3d12_resource_get_map_ptr(resource);
TRACE("Returning pointer %p.\n", *data);
}
return hr;
if (!read_range)
d3d12_resource_invalidate(resource, 0, resource->desc.Width);
else if (read_range->End > read_range->Begin)
d3d12_resource_invalidate(resource, read_range->Begin, read_range->End - read_range->Begin);
return S_OK;
}
static void STDMETHODCALLTYPE d3d12_resource_Unmap(ID3D12Resource *iface, UINT sub_resource,
......@@ -1296,9 +1273,10 @@ static void STDMETHODCALLTYPE d3d12_resource_Unmap(ID3D12Resource *iface, UINT s
return;
}
WARN("Ignoring written range %p.\n", written_range);
d3d12_heap_unmap(resource->heap, resource);
if (!written_range)
d3d12_resource_flush(resource, 0, resource->desc.Width);
else if (written_range->End > written_range->Begin)
d3d12_resource_flush(resource, written_range->Begin, written_range->End - written_range->Begin);
}
static D3D12_RESOURCE_DESC * STDMETHODCALLTYPE d3d12_resource_GetDesc(ID3D12Resource *iface,
......@@ -1330,10 +1308,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_resource_WriteToSubresource(ID3D12Resourc
VkImageSubresource vk_sub_resource;
const struct vkd3d_format *format;
VkSubresourceLayout vk_layout;
uint64_t dst_offset, dst_size;
struct d3d12_device *device;
uint8_t *dst_data;
D3D12_BOX box;
HRESULT hr;
TRACE("iface %p, src_data %p, src_row_pitch %u, src_slice_pitch %u, "
"dst_sub_resource %u, dst_box %s.\n",
......@@ -1391,20 +1369,17 @@ static HRESULT STDMETHODCALLTYPE d3d12_resource_WriteToSubresource(ID3D12Resourc
TRACE("Offset %#"PRIx64", size %#"PRIx64", row pitch %#"PRIx64", depth pitch %#"PRIx64".\n",
vk_layout.offset, vk_layout.size, vk_layout.rowPitch, vk_layout.depthPitch);
if (FAILED(hr = d3d12_heap_map(resource->heap, resource->heap_offset, resource, (void **)&dst_data)))
{
WARN("Failed to map resource %p, hr %#x.\n", resource, hr);
return hr;
}
dst_data += vk_layout.offset + vkd3d_format_get_data_offset(format, vk_layout.rowPitch,
dst_data = d3d12_resource_get_map_ptr(resource);
dst_offset = vk_layout.offset + vkd3d_format_get_data_offset(format, vk_layout.rowPitch,
vk_layout.depthPitch, dst_box->left, dst_box->top, dst_box->front);
dst_size = vk_layout.offset + vkd3d_format_get_data_offset(format, vk_layout.rowPitch,
vk_layout.depthPitch, dst_box->right, dst_box->bottom - 1, dst_box->back - 1) - dst_offset;
vkd3d_format_copy_data(format, src_data, src_row_pitch, src_slice_pitch,
dst_data, vk_layout.rowPitch, vk_layout.depthPitch, dst_box->right - dst_box->left,
dst_data + dst_offset, vk_layout.rowPitch, vk_layout.depthPitch, dst_box->right - dst_box->left,
dst_box->bottom - dst_box->top, dst_box->back - dst_box->front);
d3d12_heap_unmap(resource->heap, resource);
d3d12_resource_flush(resource, dst_offset, dst_size);
return S_OK;
}
......@@ -1418,10 +1393,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_resource_ReadFromSubresource(ID3D12Resour
VkImageSubresource vk_sub_resource;
const struct vkd3d_format *format;
VkSubresourceLayout vk_layout;
uint64_t src_offset, src_size;
struct d3d12_device *device;
uint8_t *src_data;
D3D12_BOX box;
HRESULT hr;
TRACE("iface %p, dst_data %p, dst_row_pitch %u, dst_slice_pitch %u, "
"src_sub_resource %u, src_box %s.\n",
......@@ -1479,21 +1454,18 @@ static HRESULT STDMETHODCALLTYPE d3d12_resource_ReadFromSubresource(ID3D12Resour
TRACE("Offset %#"PRIx64", size %#"PRIx64", row pitch %#"PRIx64", depth pitch %#"PRIx64".\n",
vk_layout.offset, vk_layout.size, vk_layout.rowPitch, vk_layout.depthPitch);
if (FAILED(hr = d3d12_heap_map(resource->heap, resource->heap_offset, resource, (void **)&src_data)))
{
WARN("Failed to map resource %p, hr %#x.\n", resource, hr);
return hr;
}
src_data += vk_layout.offset + vkd3d_format_get_data_offset(format, vk_layout.rowPitch,
src_data = d3d12_resource_get_map_ptr(resource);
src_offset = vk_layout.offset + vkd3d_format_get_data_offset(format, vk_layout.rowPitch,
vk_layout.depthPitch, src_box->left, src_box->top, src_box->front);
src_size = vk_layout.offset + vkd3d_format_get_data_offset(format, vk_layout.rowPitch,
vk_layout.depthPitch, src_box->right, src_box->bottom - 1, src_box->back - 1) - src_offset;
d3d12_resource_invalidate(resource, src_offset, src_size);
vkd3d_format_copy_data(format, src_data, vk_layout.rowPitch, vk_layout.depthPitch,
vkd3d_format_copy_data(format, src_data + src_offset, vk_layout.rowPitch, vk_layout.depthPitch,
dst_data, dst_row_pitch, dst_slice_pitch, src_box->right - src_box->left,
src_box->bottom - src_box->top, src_box->back - src_box->front);
d3d12_heap_unmap(resource->heap, resource);
return S_OK;
}
......
[require]
shader model >= 4.0
[pixel shader todo]
[pixel shader]
float4 main(uniform int i, uniform uint u, uniform bool b, uniform float f) : sv_target
{
......@@ -13,7 +13,7 @@ uniform 0 int -1
uniform 1 uint 3
uniform 2 int -2
uniform 3 float 0.5
todo draw quad
draw quad
probe all rgba (0.5, 0.5, 0.5, 0.5)
[pixel shader]
......
......@@ -36196,6 +36196,105 @@ static void test_clock_calibration(void)
destroy_test_context(&context);
}
 
static void test_readback_map_stability(void)
{
D3D12_TEXTURE_COPY_LOCATION dst_location, src_location;
ID3D12GraphicsCommandList *command_list;
unsigned int width, height, row_pitch;
D3D12_RESOURCE_DESC resource_desc;
struct test_context context;
ID3D12CommandQueue *queue;
ID3D12Resource *buffer;
D3D12_RANGE read_range;
ID3D12Device *device;
void *data, *data2;
uint32_t colour;
HRESULT hr;
static const float green[] = {0.0f, 1.0f, 0.0f, 1.0f};
static const float blue[] = {0.0f, 0.0f, 1.0f, 1.0f};
if (!init_test_context(&context, NULL))
return;
device = context.device;
queue = context.queue;
command_list = context.list;
resource_desc = ID3D12Resource_GetDesc(context.render_target);
width = align(resource_desc.Width, format_block_width(resource_desc.Format));
height = align(resource_desc.Height, format_block_height(resource_desc.Format));
row_pitch = align(width * format_size(resource_desc.Format), D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
buffer = create_readback_buffer(device, row_pitch * height);
dst_location.pResource = buffer;
dst_location.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
dst_location.PlacedFootprint.Offset = 0;
dst_location.PlacedFootprint.Footprint.Format = resource_desc.Format;
dst_location.PlacedFootprint.Footprint.Width = width;
dst_location.PlacedFootprint.Footprint.Height = height;
dst_location.PlacedFootprint.Footprint.Depth = 1;
dst_location.PlacedFootprint.Footprint.RowPitch = row_pitch;
src_location.pResource = context.render_target;
src_location.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
src_location.SubresourceIndex = 0;
read_range.Begin = 0;
read_range.End = row_pitch * height;
ID3D12GraphicsCommandList_ClearRenderTargetView(context.list, context.rtv, green, 0, NULL);
transition_resource_state(command_list, context.render_target,
D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE);
ID3D12GraphicsCommandList_CopyTextureRegion(command_list, &dst_location, 0, 0, 0, &src_location, NULL);
hr = ID3D12GraphicsCommandList_Close(command_list);
assert_that(hr == S_OK, "Failed to close command list, hr %#x.\n", hr);
exec_command_list(queue, command_list);
wait_queue_idle(device, queue);
hr = ID3D12Resource_Map(buffer, 0, &read_range, &data);
assert_that(hr == S_OK, "Failed to map readback buffer, hr %#x.\n", hr);
colour = *(uint32_t *)data;
ok(colour == 0xff00ff00, "Got colour %08x.\n", colour);
ID3D12Resource_Unmap(buffer, 0, NULL);
colour = *(uint32_t *)data;
ok(colour == 0xff00ff00, "Got colour %08x.\n", colour);
reset_command_list(command_list, context.allocator);
transition_resource_state(command_list, context.render_target,
D3D12_RESOURCE_STATE_COPY_SOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET);
ID3D12GraphicsCommandList_ClearRenderTargetView(context.list, context.rtv, blue, 0, NULL);
transition_resource_state(command_list, context.render_target,
D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE);
ID3D12GraphicsCommandList_CopyTextureRegion(command_list, &dst_location, 0, 0, 0, &src_location, NULL);
hr = ID3D12GraphicsCommandList_Close(command_list);
assert_that(hr == S_OK, "Failed to close command list, hr %#x.\n", hr);
exec_command_list(queue, command_list);
wait_queue_idle(device, queue);
colour = *(uint32_t *)data;
ok(colour == 0xffff0000, "Got colour %08x.\n", colour);
hr = ID3D12Resource_Map(buffer, 0, &read_range, &data2);
assert_that(hr == S_OK, "Failed to map readback buffer, hr %#x.\n", hr);
ok(data2 == data, "Expected map pointer to be stable.\n");
colour = *(uint32_t *)data2;
ok(colour == 0xffff0000, "Got colour %08x.\n", colour);
ID3D12Resource_Unmap(buffer, 0, NULL);
ID3D12Resource_Release(buffer);
destroy_test_context(&context);
}
START_TEST(d3d12)
{
parse_args(argc, argv);
......@@ -36373,4 +36472,5 @@ START_TEST(d3d12)
run_test(test_unbounded_resource_arrays);
run_test(test_unbounded_samplers);
run_test(test_clock_calibration);
run_test(test_readback_map_stability);
}
[pixel shader]
uniform float4 x;
float4 main() : SV_TARGET
{
return x.x ? x : x - 1;
}
[test]
uniform 0 float4 2.0 3.0 4.0 5.0
draw quad
probe all rgba (2.0, 3.0, 4.0, 5.0)
uniform 0 float4 0.0 10.0 11.0 12.0
draw quad
probe all rgba (-1.0, 9.0, 10.0, 11.0)