Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • wine/vkd3d
  • stefan/vkd3d
  • cmccarthy/vkd3d
  • giomasce/vkd3d
  • fcasas/vkd3d
  • jactry/vkd3d
  • ReDress/vkd3d
  • mstorsjo/vkd3d
  • huw/vkd3d
  • julliard/vkd3d
  • bshanks/vkd3d
  • zfigura/vkd3d
  • hverbeet/vkd3d
  • DarkShadow44/vkd3d
  • nsivov/vkd3d
  • dhary686/vkd3d
  • Mystral/vkd3d
  • maljaf/vkd3d
  • smcv/vkd3d
  • flibitijibibo/vkd3d
  • q4a/vkd3d
  • jsikorski/vkd3d
  • alesliehughes/vkd3d-alesliehughes
  • vitorhnn/vkd3d
  • agusev/vkd3d
  • etang-cw/vkd3d
  • petrathekat/vkd3d
  • simon.mr995/vkd3d
  • sgwaki/vkd3d
  • jacek/vkd3d
  • fweimer/vkd3d
  • Clara/vkd3d
  • disini/vkd3d
  • antenabr2/vkd3d
  • gilvbp/vkd3d
  • yshui/vkd3d
  • shaunren/vkd3d
  • jennetsaryyewa96/vkd3d
  • Jamesattay/vkd3d
  • zacemmneeto77/vkd3d
  • GermanAizek/vkd3d
  • opespinach/vkd3d
  • ruslanboyka201/vkd3d
  • navi/vkd3d
  • Feifan/vkd3d
  • yashmhmdly172/vkd3d
  • Sec32fun32/vkd3d
  • ritalat/vkd3d
  • ivyl/vkd3d
  • baikaishiuc/vkd3d
  • austin987/vkd3d
  • TornadoCookie/vkd3d
52 results
Show changes
Commits on Source (8)
...@@ -6833,7 +6833,7 @@ static void vsir_src_from_hlsl_node(struct vkd3d_shader_src_param *src, ...@@ -6833,7 +6833,7 @@ static void vsir_src_from_hlsl_node(struct vkd3d_shader_src_param *src,
} }
static void vsir_dst_from_hlsl_node(struct vkd3d_shader_dst_param *dst, static void vsir_dst_from_hlsl_node(struct vkd3d_shader_dst_param *dst,
struct hlsl_ctx *ctx, struct hlsl_ir_node *instr) struct hlsl_ctx *ctx, const struct hlsl_ir_node *instr)
{ {
VKD3D_ASSERT(instr->reg.allocated); VKD3D_ASSERT(instr->reg.allocated);
vsir_dst_param_init(dst, VKD3DSPR_TEMP, vsir_data_type_from_hlsl_instruction(ctx, instr), 1); vsir_dst_param_init(dst, VKD3DSPR_TEMP, vsir_data_type_from_hlsl_instruction(ctx, instr), 1);
...@@ -7854,6 +7854,197 @@ static bool type_is_integer(const struct hlsl_type *type) ...@@ -7854,6 +7854,197 @@ static bool type_is_integer(const struct hlsl_type *type)
|| type->e.numeric.type == HLSL_TYPE_UINT; || type->e.numeric.type == HLSL_TYPE_UINT;
} }
static void sm4_generate_vsir_cast_from_bool(struct hlsl_ctx *ctx, struct vsir_program *program,
const struct hlsl_ir_expr *expr, uint32_t bits)
{
struct hlsl_ir_node *operand = expr->operands[0].node;
const struct hlsl_ir_node *instr = &expr->node;
struct vkd3d_shader_dst_param *dst_param;
struct hlsl_constant_value value = {0};
struct vkd3d_shader_instruction *ins;
VKD3D_ASSERT(instr->reg.allocated);
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_AND, 1, 2)))
return;
dst_param = &ins->dst[0];
vsir_dst_from_hlsl_node(dst_param, ctx, instr);
vsir_src_from_hlsl_node(&ins->src[0], ctx, operand, dst_param->write_mask);
value.u[0].u = bits;
vsir_src_from_hlsl_constant_value(&ins->src[1], ctx, &value, VKD3D_DATA_UINT, 1, 0);
}
static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
struct vsir_program *program, struct hlsl_ir_expr *expr)
{
const struct hlsl_ir_node *arg1 = expr->operands[0].node;
const struct hlsl_type *dst_type = expr->node.data_type;
const struct hlsl_type *src_type = arg1->data_type;
static const union
{
uint32_t u;
float f;
} one = { .f = 1.0 };
/* Narrowing casts were already lowered. */
VKD3D_ASSERT(src_type->dimx == dst_type->dimx);
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
return true;
case HLSL_TYPE_INT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ITOF, 0, 0, true);
return true;
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UTOF, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
sm4_generate_vsir_cast_from_bool(ctx, program, expr, one.u);
return true;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(ctx, &expr->node.loc, "SM4 cast from double to float.");
return false;
default:
vkd3d_unreachable();
}
break;
case HLSL_TYPE_INT:
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_FTOI, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
sm4_generate_vsir_cast_from_bool(ctx, program, expr, 1u);
return true;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(ctx, &expr->node.loc, "SM4 cast from double to int.");
return false;
default:
vkd3d_unreachable();
}
break;
case HLSL_TYPE_UINT:
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_FTOU, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
sm4_generate_vsir_cast_from_bool(ctx, program, expr, 1u);
return true;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(ctx, &expr->node.loc, "SM4 cast from double to uint.");
return false;
default:
vkd3d_unreachable();
}
break;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(ctx, &expr->node.loc, "SM4 cast to double.");
return false;
case HLSL_TYPE_BOOL:
/* Casts to bool should have already been lowered. */
default:
vkd3d_unreachable();
}
}
static void sm4_generate_vsir_expr_with_two_destinations(struct hlsl_ctx *ctx, struct vsir_program *program,
enum vkd3d_shader_opcode opcode, const struct hlsl_ir_expr *expr, unsigned int dst_idx)
{
struct vkd3d_shader_dst_param *dst_param, *null_param;
const struct hlsl_ir_node *instr = &expr->node;
struct vkd3d_shader_instruction *ins;
unsigned int i, src_count;
VKD3D_ASSERT(instr->reg.allocated);
for (i = 0; i < HLSL_MAX_OPERANDS; ++i)
{
if (expr->operands[i].node)
src_count = i + 1;
}
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 2, src_count)))
return;
dst_param = &ins->dst[dst_idx];
vsir_dst_from_hlsl_node(dst_param, ctx, instr);
null_param = &ins->dst[1 - dst_idx];
vsir_dst_param_init(null_param, VKD3DSPR_NULL, VKD3D_DATA_FLOAT, 0);
null_param->reg.dimension = VSIR_DIMENSION_NONE;
for (i = 0; i < src_count; ++i)
vsir_src_from_hlsl_node(&ins->src[i], ctx, expr->operands[i].node, dst_param->write_mask);
}
static void sm4_generate_vsir_rcp_using_div(struct hlsl_ctx *ctx,
struct vsir_program *program, const struct hlsl_ir_expr *expr)
{
struct hlsl_ir_node *operand = expr->operands[0].node;
const struct hlsl_ir_node *instr = &expr->node;
struct vkd3d_shader_dst_param *dst_param;
struct hlsl_constant_value value = {0};
struct vkd3d_shader_instruction *ins;
VKD3D_ASSERT(type_is_float(expr->node.data_type));
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_DIV, 1, 2)))
return;
dst_param = &ins->dst[0];
vsir_dst_from_hlsl_node(dst_param, ctx, instr);
value.u[0].f = 1.0f;
value.u[1].f = 1.0f;
value.u[2].f = 1.0f;
value.u[3].f = 1.0f;
vsir_src_from_hlsl_constant_value(&ins->src[0], ctx, &value,
VKD3D_DATA_FLOAT, instr->data_type->dimx, dst_param->write_mask);
vsir_src_from_hlsl_node(&ins->src[1], ctx, operand, dst_param->write_mask);
}
static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
struct vsir_program *program, struct hlsl_ir_expr *expr, const char *dst_type_name) struct vsir_program *program, struct hlsl_ir_expr *expr, const char *dst_type_name)
{ {
...@@ -7880,11 +8071,19 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, ...@@ -7880,11 +8071,19 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_NOT, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_NOT, 0, 0, true);
return true; return true;
case HLSL_OP1_CAST:
return sm4_generate_vsir_instr_expr_cast(ctx, program, expr);
case HLSL_OP1_CEIL: case HLSL_OP1_CEIL:
VKD3D_ASSERT(type_is_float(dst_type)); VKD3D_ASSERT(type_is_float(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ROUND_PI, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ROUND_PI, 0, 0, true);
return true; return true;
case HLSL_OP1_COS:
VKD3D_ASSERT(type_is_float(dst_type));
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_SINCOS, expr, 1);
return true;
case HLSL_OP1_DSX: case HLSL_OP1_DSX:
VKD3D_ASSERT(type_is_float(dst_type)); VKD3D_ASSERT(type_is_float(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX, 0, 0, true);
...@@ -7969,6 +8168,22 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, ...@@ -7969,6 +8168,22 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return false; return false;
} }
case HLSL_OP1_RCP:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
/* SM5 comes with a RCP opcode */
if (hlsl_version_ge(ctx, 5, 0))
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_RCP, 0, 0, true);
else
sm4_generate_vsir_rcp_using_div(ctx, program, expr);
return true;
default:
hlsl_fixme(ctx, &expr->node.loc, "SM4 %s rcp expression.", dst_type_name);
return false;
}
case HLSL_OP1_REINTERPRET: case HLSL_OP1_REINTERPRET:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
return true; return true;
...@@ -7983,6 +8198,16 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, ...@@ -7983,6 +8198,16 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_RSQ, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_RSQ, 0, 0, true);
return true; return true;
case HLSL_OP1_SAT:
VKD3D_ASSERT(type_is_float(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, VKD3DSPDM_SATURATE, true);
return true;
case HLSL_OP1_SIN:
VKD3D_ASSERT(type_is_float(dst_type));
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_SINCOS, expr, 0);
return true;
case HLSL_OP1_SQRT: case HLSL_OP1_SQRT:
VKD3D_ASSERT(type_is_float(dst_type)); VKD3D_ASSERT(type_is_float(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_SQRT, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_SQRT, 0, 0, true);
...@@ -8025,6 +8250,50 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, ...@@ -8025,6 +8250,50 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_XOR, 0, 0, true); generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_XOR, 0, 0, true);
return true; return true;
case HLSL_OP2_DIV:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DIV, 0, 0, true);
return true;
case HLSL_TYPE_UINT:
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_UDIV, expr, 0);
return true;
default:
hlsl_fixme(ctx, &expr->node.loc, "SM4 %s division expression.", dst_type_name);
return false;
}
case HLSL_OP2_DOT:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
switch (expr->operands[0].node->data_type->dimx)
{
case 4:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP4, 0, 0, false);
return true;
case 3:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP3, 0, 0, false);
return true;
case 2:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP2, 0, 0, false);
return true;
case 1:
default:
vkd3d_unreachable();
}
default:
hlsl_fixme(ctx, &expr->node.loc, "SM4 %s dot expression.", dst_type_name);
return false;
}
case HLSL_OP2_EQUAL: case HLSL_OP2_EQUAL:
VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL); VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL);
...@@ -8167,6 +8436,37 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, ...@@ -8167,6 +8436,37 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return false; return false;
} }
case HLSL_OP2_MOD:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_UINT:
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_UDIV, expr, 1);
return true;
default:
hlsl_fixme(ctx, &expr->node.loc, "SM4 %s modulus expression.", dst_type_name);
return false;
}
case HLSL_OP2_MUL:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MUL, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
/* Using IMUL instead of UMUL because we're taking the low
* bits, and the native compiler generates IMUL. */
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_IMUL, expr, 1);
return true;
default:
hlsl_fixme(ctx, &expr->node.loc, "SM4 %s multiplication expression.", dst_type_name);
return false;
}
case HLSL_OP2_NEQUAL: case HLSL_OP2_NEQUAL:
VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL); VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL);
...@@ -8200,6 +8500,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx, ...@@ -8200,6 +8500,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true; return true;
default: default:
hlsl_fixme(ctx, &expr->node.loc, "SM4 %s expression.", debug_hlsl_expr_op(expr->op));
return false; return false;
} }
} }
......
...@@ -5032,105 +5032,6 @@ static void write_sm4_ret(const struct tpf_compiler *tpf) ...@@ -5032,105 +5032,6 @@ static void write_sm4_ret(const struct tpf_compiler *tpf)
write_sm4_instruction(tpf, &instr); write_sm4_instruction(tpf, &instr);
} }
static void write_sm4_unary_op(const struct tpf_compiler *tpf, enum vkd3d_sm4_opcode opcode,
const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src, enum vkd3d_shader_src_modifier src_mod)
{
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = opcode;
sm4_dst_from_node(&instr.dsts[0], dst);
instr.dst_count = 1;
sm4_src_from_node(tpf, &instr.srcs[0], src, instr.dsts[0].write_mask);
instr.srcs[0].modifiers = src_mod;
instr.src_count = 1;
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_unary_op_with_two_destinations(const struct tpf_compiler *tpf, enum vkd3d_sm4_opcode opcode,
const struct hlsl_ir_node *dst, unsigned int dst_idx, const struct hlsl_ir_node *src)
{
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = opcode;
VKD3D_ASSERT(dst_idx < ARRAY_SIZE(instr.dsts));
sm4_dst_from_node(&instr.dsts[dst_idx], dst);
instr.dsts[1 - dst_idx].reg.type = VKD3DSPR_NULL;
instr.dsts[1 - dst_idx].reg.dimension = VSIR_DIMENSION_NONE;
instr.dsts[1 - dst_idx].reg.idx_count = 0;
instr.dst_count = 2;
sm4_src_from_node(tpf, &instr.srcs[0], src, instr.dsts[dst_idx].write_mask);
instr.src_count = 1;
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_binary_op(const struct tpf_compiler *tpf, enum vkd3d_sm4_opcode opcode,
const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2)
{
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = opcode;
sm4_dst_from_node(&instr.dsts[0], dst);
instr.dst_count = 1;
sm4_src_from_node(tpf, &instr.srcs[0], src1, instr.dsts[0].write_mask);
sm4_src_from_node(tpf, &instr.srcs[1], src2, instr.dsts[0].write_mask);
instr.src_count = 2;
write_sm4_instruction(tpf, &instr);
}
/* dp# instructions don't map the swizzle. */
static void write_sm4_binary_op_dot(const struct tpf_compiler *tpf, enum vkd3d_sm4_opcode opcode,
const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2)
{
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = opcode;
sm4_dst_from_node(&instr.dsts[0], dst);
instr.dst_count = 1;
sm4_src_from_node(tpf, &instr.srcs[0], src1, VKD3DSP_WRITEMASK_ALL);
sm4_src_from_node(tpf, &instr.srcs[1], src2, VKD3DSP_WRITEMASK_ALL);
instr.src_count = 2;
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_binary_op_with_two_destinations(const struct tpf_compiler *tpf,
enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, unsigned int dst_idx,
const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2)
{
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = opcode;
VKD3D_ASSERT(dst_idx < ARRAY_SIZE(instr.dsts));
sm4_dst_from_node(&instr.dsts[dst_idx], dst);
instr.dsts[1 - dst_idx].reg.type = VKD3DSPR_NULL;
instr.dsts[1 - dst_idx].reg.dimension = VSIR_DIMENSION_NONE;
instr.dsts[1 - dst_idx].reg.idx_count = 0;
instr.dst_count = 2;
sm4_src_from_node(tpf, &instr.srcs[0], src1, instr.dsts[dst_idx].write_mask);
sm4_src_from_node(tpf, &instr.srcs[1], src2, instr.dsts[dst_idx].write_mask);
instr.src_count = 2;
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_ld(const struct tpf_compiler *tpf, const struct hlsl_ir_node *dst, static void write_sm4_ld(const struct tpf_compiler *tpf, const struct hlsl_ir_node *dst,
const struct hlsl_deref *resource, const struct hlsl_ir_node *coords, const struct hlsl_deref *resource, const struct hlsl_ir_node *coords,
const struct hlsl_ir_node *sample_index, const struct hlsl_ir_node *texel_offset, const struct hlsl_ir_node *sample_index, const struct hlsl_ir_node *texel_offset,
...@@ -5345,297 +5246,6 @@ static void write_sm4_resinfo(const struct tpf_compiler *tpf, const struct hlsl_ ...@@ -5345,297 +5246,6 @@ static void write_sm4_resinfo(const struct tpf_compiler *tpf, const struct hlsl_
write_sm4_instruction(tpf, &instr); write_sm4_instruction(tpf, &instr);
} }
static bool type_is_float(const struct hlsl_type *type)
{
return type->e.numeric.type == HLSL_TYPE_FLOAT || type->e.numeric.type == HLSL_TYPE_HALF;
}
static void write_sm4_cast_from_bool(const struct tpf_compiler *tpf, const struct hlsl_ir_expr *expr,
const struct hlsl_ir_node *arg, uint32_t mask)
{
struct sm4_instruction instr;
memset(&instr, 0, sizeof(instr));
instr.opcode = VKD3D_SM4_OP_AND;
sm4_dst_from_node(&instr.dsts[0], &expr->node);
instr.dst_count = 1;
sm4_src_from_node(tpf, &instr.srcs[0], arg, instr.dsts[0].write_mask);
instr.srcs[1].reg.type = VKD3DSPR_IMMCONST;
instr.srcs[1].reg.dimension = VSIR_DIMENSION_SCALAR;
instr.srcs[1].reg.u.immconst_u32[0] = mask;
instr.src_count = 2;
write_sm4_instruction(tpf, &instr);
}
static void write_sm4_cast(const struct tpf_compiler *tpf, const struct hlsl_ir_expr *expr)
{
static const union
{
uint32_t u;
float f;
} one = { .f = 1.0 };
const struct hlsl_ir_node *arg1 = expr->operands[0].node;
const struct hlsl_type *dst_type = expr->node.data_type;
const struct hlsl_type *src_type = arg1->data_type;
/* Narrowing casts were already lowered. */
VKD3D_ASSERT(src_type->dimx == dst_type->dimx);
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0);
break;
case HLSL_TYPE_INT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_ITOF, &expr->node, arg1, 0);
break;
case HLSL_TYPE_UINT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_UTOF, &expr->node, arg1, 0);
break;
case HLSL_TYPE_BOOL:
write_sm4_cast_from_bool(tpf, expr, arg1, one.u);
break;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 cast from double to float.");
break;
default:
vkd3d_unreachable();
}
break;
case HLSL_TYPE_INT:
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_FTOI, &expr->node, arg1, 0);
break;
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0);
break;
case HLSL_TYPE_BOOL:
write_sm4_cast_from_bool(tpf, expr, arg1, 1);
break;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 cast from double to int.");
break;
default:
vkd3d_unreachable();
}
break;
case HLSL_TYPE_UINT:
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_FTOU, &expr->node, arg1, 0);
break;
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
write_sm4_unary_op(tpf, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0);
break;
case HLSL_TYPE_BOOL:
write_sm4_cast_from_bool(tpf, expr, arg1, 1);
break;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 cast from double to uint.");
break;
default:
vkd3d_unreachable();
}
break;
case HLSL_TYPE_DOUBLE:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 cast to double.");
break;
case HLSL_TYPE_BOOL:
/* Casts to bool should have already been lowered. */
default:
vkd3d_unreachable();
}
}
static void write_sm4_expr(const struct tpf_compiler *tpf, const struct hlsl_ir_expr *expr)
{
const struct vkd3d_shader_version *version = &tpf->program->shader_version;
const struct hlsl_ir_node *arg1 = expr->operands[0].node;
const struct hlsl_ir_node *arg2 = expr->operands[1].node;
const struct hlsl_type *dst_type = expr->node.data_type;
struct vkd3d_string_buffer *dst_type_string;
VKD3D_ASSERT(expr->node.reg.allocated);
if (!(dst_type_string = hlsl_type_to_string(tpf->ctx, dst_type)))
return;
switch (expr->op)
{
case HLSL_OP1_CAST:
write_sm4_cast(tpf, expr);
break;
case HLSL_OP1_COS:
VKD3D_ASSERT(type_is_float(dst_type));
write_sm4_unary_op_with_two_destinations(tpf, VKD3D_SM4_OP_SINCOS, &expr->node, 1, arg1);
break;
case HLSL_OP1_RCP:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
/* SM5 comes with a RCP opcode */
if (vkd3d_shader_ver_ge(version, 5, 0))
{
write_sm4_unary_op(tpf, VKD3D_SM5_OP_RCP, &expr->node, arg1, 0);
}
else
{
/* For SM4, implement as DIV dst, 1.0, src */
struct sm4_instruction instr;
struct hlsl_constant_value one;
VKD3D_ASSERT(type_is_float(dst_type));
memset(&instr, 0, sizeof(instr));
instr.opcode = VKD3D_SM4_OP_DIV;
sm4_dst_from_node(&instr.dsts[0], &expr->node);
instr.dst_count = 1;
for (unsigned int i = 0; i < 4; i++)
one.u[i].f = 1.0f;
sm4_src_from_constant_value(&instr.srcs[0], &one, dst_type->dimx, instr.dsts[0].write_mask);
sm4_src_from_node(tpf, &instr.srcs[1], arg1, instr.dsts[0].write_mask);
instr.src_count = 2;
write_sm4_instruction(tpf, &instr);
}
break;
default:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 %s rcp expression.", dst_type_string->buffer);
}
break;
case HLSL_OP1_SAT:
VKD3D_ASSERT(type_is_float(dst_type));
write_sm4_unary_op(tpf, VKD3D_SM4_OP_MOV
| (VKD3D_SM4_INSTRUCTION_FLAG_SATURATE << VKD3D_SM4_INSTRUCTION_FLAGS_SHIFT),
&expr->node, arg1, 0);
break;
case HLSL_OP1_SIN:
VKD3D_ASSERT(type_is_float(dst_type));
write_sm4_unary_op_with_two_destinations(tpf, VKD3D_SM4_OP_SINCOS, &expr->node, 0, arg1);
break;
case HLSL_OP2_DIV:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
write_sm4_binary_op(tpf, VKD3D_SM4_OP_DIV, &expr->node, arg1, arg2);
break;
case HLSL_TYPE_UINT:
write_sm4_binary_op_with_two_destinations(tpf, VKD3D_SM4_OP_UDIV, &expr->node, 0, arg1, arg2);
break;
default:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 %s division expression.", dst_type_string->buffer);
}
break;
case HLSL_OP2_DOT:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
switch (arg1->data_type->dimx)
{
case 4:
write_sm4_binary_op_dot(tpf, VKD3D_SM4_OP_DP4, &expr->node, arg1, arg2);
break;
case 3:
write_sm4_binary_op_dot(tpf, VKD3D_SM4_OP_DP3, &expr->node, arg1, arg2);
break;
case 2:
write_sm4_binary_op_dot(tpf, VKD3D_SM4_OP_DP2, &expr->node, arg1, arg2);
break;
case 1:
default:
vkd3d_unreachable();
}
break;
default:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 %s dot expression.", dst_type_string->buffer);
}
break;
case HLSL_OP2_MOD:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_UINT:
write_sm4_binary_op_with_two_destinations(tpf, VKD3D_SM4_OP_UDIV, &expr->node, 1, arg1, arg2);
break;
default:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 %s modulus expression.", dst_type_string->buffer);
}
break;
case HLSL_OP2_MUL:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
write_sm4_binary_op(tpf, VKD3D_SM4_OP_MUL, &expr->node, arg1, arg2);
break;
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
/* Using IMUL instead of UMUL because we're taking the low
* bits, and the native compiler generates IMUL. */
write_sm4_binary_op_with_two_destinations(tpf, VKD3D_SM4_OP_IMUL, &expr->node, 1, arg1, arg2);
break;
default:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 %s multiplication expression.", dst_type_string->buffer);
}
break;
default:
hlsl_fixme(tpf->ctx, &expr->node.loc, "SM4 %s expression.", debug_hlsl_expr_op(expr->op));
}
hlsl_release_string_buffer(tpf->ctx, dst_type_string);
}
static void write_sm4_if(struct tpf_compiler *tpf, const struct hlsl_ir_if *iff) static void write_sm4_if(struct tpf_compiler *tpf, const struct hlsl_ir_if *iff)
{ {
struct sm4_instruction instr = struct sm4_instruction instr =
...@@ -6031,7 +5641,17 @@ static void tpf_simple_instruction(struct tpf_compiler *tpf, const struct vkd3d_ ...@@ -6031,7 +5641,17 @@ static void tpf_simple_instruction(struct tpf_compiler *tpf, const struct vkd3d_
instr.src_count = ins->src_count; instr.src_count = ins->src_count;
for (unsigned int i = 0; i < ins->dst_count; ++i) for (unsigned int i = 0; i < ins->dst_count; ++i)
{
instr.dsts[i] = ins->dst[i]; instr.dsts[i] = ins->dst[i];
if (instr.dsts[i].modifiers & VKD3DSPDM_SATURATE)
{
/* For vsir SATURATE is a dst modifier, while for tpf it is an instruction flag. */
VKD3D_ASSERT(ins->dst_count == 1);
instr.dsts[i].modifiers &= ~VKD3DSPDM_SATURATE;
instr.extra_bits |= VKD3D_SM4_INSTRUCTION_FLAG_SATURATE << VKD3D_SM4_INSTRUCTION_FLAGS_SHIFT;
}
}
for (unsigned int i = 0; i < ins->src_count; ++i) for (unsigned int i = 0; i < ins->src_count; ++i)
instr.srcs[i] = ins->src[i]; instr.srcs[i] = ins->src[i];
...@@ -6084,6 +5704,10 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_ ...@@ -6084,6 +5704,10 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_
case VKD3DSIH_ADD: case VKD3DSIH_ADD:
case VKD3DSIH_AND: case VKD3DSIH_AND:
case VKD3DSIH_DIV:
case VKD3DSIH_DP2:
case VKD3DSIH_DP3:
case VKD3DSIH_DP4:
case VKD3DSIH_DSX: case VKD3DSIH_DSX:
case VKD3DSIH_DSX_COARSE: case VKD3DSIH_DSX_COARSE:
case VKD3DSIH_DSX_FINE: case VKD3DSIH_DSX_FINE:
...@@ -6095,18 +5719,22 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_ ...@@ -6095,18 +5719,22 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_
case VKD3DSIH_F16TOF32: case VKD3DSIH_F16TOF32:
case VKD3DSIH_F32TOF16: case VKD3DSIH_F32TOF16:
case VKD3DSIH_FRC: case VKD3DSIH_FRC:
case VKD3DSIH_FTOI:
case VKD3DSIH_FTOU:
case VKD3DSIH_GEO: case VKD3DSIH_GEO:
case VKD3DSIH_IADD: case VKD3DSIH_IADD:
case VKD3DSIH_IEQ: case VKD3DSIH_IEQ:
case VKD3DSIH_IGE: case VKD3DSIH_IGE:
case VKD3DSIH_ILT: case VKD3DSIH_ILT:
case VKD3DSIH_INE:
case VKD3DSIH_IMAD: case VKD3DSIH_IMAD:
case VKD3DSIH_IMAX: case VKD3DSIH_IMAX:
case VKD3DSIH_IMIN: case VKD3DSIH_IMIN:
case VKD3DSIH_IMUL:
case VKD3DSIH_INE:
case VKD3DSIH_INEG: case VKD3DSIH_INEG:
case VKD3DSIH_ISHL: case VKD3DSIH_ISHL:
case VKD3DSIH_ISHR: case VKD3DSIH_ISHR:
case VKD3DSIH_ITOF:
case VKD3DSIH_LOG: case VKD3DSIH_LOG:
case VKD3DSIH_LTO: case VKD3DSIH_LTO:
case VKD3DSIH_MAD: case VKD3DSIH_MAD:
...@@ -6114,21 +5742,26 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_ ...@@ -6114,21 +5742,26 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_
case VKD3DSIH_MIN: case VKD3DSIH_MIN:
case VKD3DSIH_MOV: case VKD3DSIH_MOV:
case VKD3DSIH_MOVC: case VKD3DSIH_MOVC:
case VKD3DSIH_MUL:
case VKD3DSIH_NEU: case VKD3DSIH_NEU:
case VKD3DSIH_NOT: case VKD3DSIH_NOT:
case VKD3DSIH_OR: case VKD3DSIH_OR:
case VKD3DSIH_RCP:
case VKD3DSIH_ROUND_NE: case VKD3DSIH_ROUND_NE:
case VKD3DSIH_ROUND_NI: case VKD3DSIH_ROUND_NI:
case VKD3DSIH_ROUND_PI: case VKD3DSIH_ROUND_PI:
case VKD3DSIH_ROUND_Z: case VKD3DSIH_ROUND_Z:
case VKD3DSIH_RSQ: case VKD3DSIH_RSQ:
case VKD3DSIH_SAMPLE_INFO: case VKD3DSIH_SAMPLE_INFO:
case VKD3DSIH_SINCOS:
case VKD3DSIH_SQRT: case VKD3DSIH_SQRT:
case VKD3DSIH_UDIV:
case VKD3DSIH_UGE: case VKD3DSIH_UGE:
case VKD3DSIH_ULT: case VKD3DSIH_ULT:
case VKD3DSIH_UMAX: case VKD3DSIH_UMAX:
case VKD3DSIH_UMIN: case VKD3DSIH_UMIN:
case VKD3DSIH_USHR: case VKD3DSIH_USHR:
case VKD3DSIH_UTOF:
case VKD3DSIH_XOR: case VKD3DSIH_XOR:
tpf_simple_instruction(tpf, ins); tpf_simple_instruction(tpf, ins);
break; break;
...@@ -6168,10 +5801,6 @@ static void write_sm4_block(struct tpf_compiler *tpf, const struct hlsl_block *b ...@@ -6168,10 +5801,6 @@ static void write_sm4_block(struct tpf_compiler *tpf, const struct hlsl_block *b
case HLSL_IR_CONSTANT: case HLSL_IR_CONSTANT:
vkd3d_unreachable(); vkd3d_unreachable();
case HLSL_IR_EXPR:
write_sm4_expr(tpf, hlsl_ir_expr(instr));
break;
case HLSL_IR_IF: case HLSL_IR_IF:
write_sm4_if(tpf, hlsl_ir_if(instr)); write_sm4_if(tpf, hlsl_ir_if(instr));
break; break;
......