Merge pull request #8573 from heitbaum/mesa

mesa: drop upstreamed RPi5 patches included in mesa-24.0.0
This commit is contained in:
Christian Hewitt 2024-02-01 11:37:27 +04:00 committed by GitHub
commit c166d84a00
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 0 additions and 663 deletions

View File

@ -1,214 +0,0 @@
From 80050d6960a688d061eac9798c6f5f1b0eb3e960 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alejandro=20Pi=C3=B1eiro?= <apinheiro@igalia.com>
Date: Tue, 30 Nov 2021 02:39:20 +0100
Subject: [PATCH 2/3] nir: add new opcodes to map new v71 packing/conversion
instructions
Since v71, broadcom hw include specific packing/conversion
instructions, so this commit adds opcodes to be able to make use of
them, specially for image stores:
* vftounorm8/vftosnorm8: 2x16-bit floating point to 2x8-bit
unorm/snorm
* ftounorm16/ftosnorm16: floating point to 16-bit unorm/snorm
* vftounorm10lo/vftounorm10hi: used to convert a floating point to
a r10g10b10a2 unorm
* v11fpack: packs 2 2x16 FP into R11G11B10.
* v10pack: pack 2 2x16 integer into R10G10B10A2
* v8pack: packs 2 2x16 bit integer into 4x8 bits.
* vpack: 2x32 bit to 2x16 integer pack
For the latter, it can be easly confused with the existing and general
pack_32_2x16_split. But note that this one receives two 16bit integer,
and packs them on a 32bit integer. But broadcom opcode takes two 32bit
integer, takes the lower halfword, and packs them as 2x16 on a 32bit
integer.
Interestingly broadcom also defines a similar one that packs the
higher halfword. Not used yet.
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
---
src/compiler/nir/nir_constant_expressions.py | 94 ++++++++++++++++++++
src/compiler/nir/nir_opcodes.py | 52 +++++++++++
2 files changed, 146 insertions(+)
diff --git a/src/compiler/nir/nir_constant_expressions.py b/src/compiler/nir/nir_constant_expressions.py
index e6383b67737..0d0797526a9 100644
--- a/src/compiler/nir/nir_constant_expressions.py
+++ b/src/compiler/nir/nir_constant_expressions.py
@@ -62,6 +62,8 @@ template = """\
#include "util/softfloat.h"
#include "util/bigmath.h"
#include "util/format/format_utils.h"
+#include "util/format_r11g11b10f.h"
+#include "util/u_math.h"
#include "nir_constant_expressions.h"
/**
@@ -277,6 +279,98 @@ unpack_half_1x16(uint16_t u)
return _mesa_half_to_float(u);
}
+/* Broadcom v3d specific instructions */
+/**
+ * Packs 2 2x16 floating split into a r11g11b10f
+ */
+static uint32_t v11fpack_v3d(const uint32_t src0,
+ const uint32_t src1)
+{
+ float rgb[3] = {
+ unpack_half_1x16((src0 & 0xffff)),
+ unpack_half_1x16((src0 >> 16)),
+ unpack_half_1x16((src1 & 0xffff)),
+ };
+
+ return float3_to_r11g11b10f(rgb);
+}
+
+/**
+ * The three methods below are basically wrappers over pack_s/unorm_1x8/1x16,
+ * as they receives a uint16_t val instead of a float
+ */
+static inline uint8_t _mesa_half_to_snorm8(uint16_t val)
+{
+ return pack_snorm_1x8(_mesa_half_to_float(val));
+}
+
+static uint16_t _mesa_float_to_snorm16(uint32_t val)
+{
+ union fi aux;
+ aux.ui = val;
+ return pack_snorm_1x16(aux.f);
+}
+
+static uint16_t _mesa_float_to_unorm16(uint32_t val)
+{
+ union fi aux;
+ aux.ui = val;
+ return pack_unorm_1x16(aux.f);
+}
+
+static inline uint32_t float_pack16_v3d(uint32_t f32)
+{
+ return _mesa_float_to_half(uif(f32));
+}
+
+static inline uint32_t float_unpack16_v3d(uint32_t f16)
+{
+ return fui(_mesa_half_to_float(f16));
+}
+
+static inline uint32_t vfpack_v3d(uint32_t a, uint32_t b)
+{
+ return float_pack16_v3d(b) << 16 | float_pack16_v3d(a);
+}
+
+static inline uint32_t vfsat_v3d(uint32_t a)
+{
+ const uint32_t low = fui(SATURATE(_mesa_half_to_float(a & 0xffff)));
+ const uint32_t high = fui(SATURATE(_mesa_half_to_float(a >> 16)));
+
+ return vfpack_v3d(low, high);
+}
+
+static inline uint32_t fmul_v3d(uint32_t a, uint32_t b)
+{
+ return fui(uif(a) * uif(b));
+}
+
+static uint32_t vfmul_v3d(uint32_t a, uint32_t b)
+{
+ const uint32_t low = fmul_v3d(float_unpack16_v3d(a & 0xffff),
+ float_unpack16_v3d(b & 0xffff));
+ const uint32_t high = fmul_v3d(float_unpack16_v3d(a >> 16),
+ float_unpack16_v3d(b >> 16));
+
+ return vfpack_v3d(low, high);
+}
+
+/* Convert 2x16-bit floating point to 2x10-bit unorm */
+static uint32_t vftounorm10lo(uint32_t src0)
+{
+ return vfmul_v3d(vfsat_v3d(src0), 0x03ff03ff);
+}
+
+/*
+ * Convert 2x16-bit floating point to one 2-bit and one
+ * 10-bit unorm
+ */
+static uint32_t vftounorm10hi(uint32_t src0)
+{
+ return vfmul_v3d(vfsat_v3d(src0), 0x000303ff);
+}
+
/* Some typed vector structures to make things like src0.y work */
typedef int8_t int1_t;
typedef uint8_t uint1_t;
diff --git a/src/compiler/nir/nir_opcodes.py b/src/compiler/nir/nir_opcodes.py
index 0f81328f441..b70d9567cd6 100644
--- a/src/compiler/nir/nir_opcodes.py
+++ b/src/compiler/nir/nir_opcodes.py
@@ -1413,6 +1413,58 @@ for (int i = 0; i < 32; i += 8) {
}
""")
+# v3d-specific opcodes
+
+# v3d-specific (v71) instruction that packs bits of 2 2x16 floating point into
+# r11g11b10 bits, rounding to nearest even, so
+# dst[10:0] = float16_to_float11 (src0[15:0])
+# dst[21:11] = float16_to_float11 (src0[31:16])
+# dst[31:22] = float16_to_float10 (src1[15:0])
+binop_convert("v11fpack_v3d", tuint32, tuint32, "",
+ "v11fpack_v3d(src0, src1)")
+
+# v3d-specific (v71) instruction that packs 2x32 bit to 2x16 bit integer. The
+# difference with pack_32_2x16_split is that the sources are 32bit too. So it
+# receives 2 32-bit integer, and packs the lower halfword as 2x16 on a 32-bit
+# integer.
+binop_horiz("vpack_v3d", 1, tuint32, 1, tuint32, 1, tuint32,
+ "(src0.x & 0xffff) | (src1.x << 16)")
+
+# v3d-specific (v71) instruction that packs bits of 2 2x16 integers into
+# r10g10b10a2:
+# dst[9:0] = src0[9:0]
+# dst[19:10] = src0[25:16]
+# dst[29:20] = src1[9:0]
+# dst[31:30] = src1[17:16]
+binop_convert("v10pack_v3d", tuint32, tuint32, "",
+ "(src0 & 0x3ff) | ((src0 >> 16) & 0x3ff) << 10 | (src1 & 0x3ff) << 20 | ((src1 >> 16) & 0x3ff) << 30")
+
+# v3d-specific (v71) instruction that packs 2 2x16 bit integers into 4x8 bits:
+# dst[7:0] = src0[7:0]
+# dst[15:8] = src0[23:16]
+# dst[23:16] = src1[7:0]
+# dst[31:24] = src1[23:16]
+opcode("v8pack_v3d", 0, tuint32, [0, 0], [tuint32, tuint32],
+ False, "",
+ "(src0 & 0x000000ff) | (src0 & 0x00ff0000) >> 8 | (src1 & 0x000000ff) << 16 | (src1 & 0x00ff0000) << 8")
+
+# v3d-specific (v71) instructions to convert 2x16 floating point to 2x8 bit unorm/snorm
+unop("vftounorm8_v3d", tuint32,
+ "_mesa_half_to_unorm(src0 & 0xffff, 8) | (_mesa_half_to_unorm(src0 >> 16, 8) << 16)")
+unop("vftosnorm8_v3d", tuint32,
+ "_mesa_half_to_snorm(src0 & 0xffff, 8) | (_mesa_half_to_snorm(src0 >> 16, 8) << 16)")
+
+# v3d-specific (v71) instructions to convert 32-bit floating point to 16 bit unorm/snorm
+unop("ftounorm16_v3d", tuint32, "_mesa_float_to_unorm16(src0)")
+unop("ftosnorm16_v3d", tuint32, "_mesa_float_to_snorm16(src0)")
+
+# v3d-specific (v71) instructions to convert 2x16 bit floating points to 2x10 bit unorm
+unop("vftounorm10lo_v3d", tuint32, "vftounorm10lo(src0)")
+
+# v3d-specific (v71) instructions to convert 2x16 bit floating points to one 2-bit
+# and one 10 bit unorm
+unop("vftounorm10hi_v3d", tuint32, "vftounorm10hi(src0)")
+
# Mali-specific opcodes
unop("fsat_signed_mali", tfloat, ("fmin(fmax(src0, -1.0), 1.0)"))
unop("fclamp_pos_mali", tfloat, ("fmax(src0, 0.0)"))
--
2.39.2

View File

@ -1,449 +0,0 @@
From 7e151fd3a213848c8022c9f48e10f2aec76c3e4d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alejandro=20Pi=C3=B1eiro?= <apinheiro@igalia.com>
Date: Thu, 2 Dec 2021 13:26:43 +0100
Subject: [PATCH 3/3] broadcom/compiler: update image store lowering to use v71
new packing/conversion instructions
Vulkan shaderdb stats with pattern dEQP-VK.image.*.with_format.*.*:
total instructions in shared programs: 35993 -> 33245 (-7.63%)
instructions in affected programs: 21153 -> 18405 (-12.99%)
helped: 394
HURT: 1
Instructions are helped.
total uniforms in shared programs: 8550 -> 7418 (-13.24%)
uniforms in affected programs: 5136 -> 4004 (-22.04%)
helped: 399
HURT: 0
Uniforms are helped.
total max-temps in shared programs: 6014 -> 5905 (-1.81%)
max-temps in affected programs: 473 -> 364 (-23.04%)
helped: 58
HURT: 0
Max-temps are helped.
total nops in shared programs: 1515 -> 1504 (-0.73%)
nops in affected programs: 46 -> 35 (-23.91%)
helped: 14
HURT: 2
Inconclusive result (%-change mean confidence interval includes 0).
FWIW, that one HURT on the instructions count is for just one
instruction.
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
---
src/broadcom/compiler/nir_to_vir.c | 40 +++
src/broadcom/compiler/v3d_compiler.h | 16 +-
.../compiler/v3d_nir_lower_image_load_store.c | 239 +++++++++++++++++-
src/broadcom/compiler/vir.c | 2 +-
4 files changed, 288 insertions(+), 9 deletions(-)
diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c
index 220c864a056..4329d4c85f6 100644
--- a/src/broadcom/compiler/nir_to_vir.c
+++ b/src/broadcom/compiler/nir_to_vir.c
@@ -1688,6 +1688,22 @@ ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
result = vir_VFPACK(c, src[0], src[1]);
break;
+ case nir_op_vpack_v3d:
+ result = vir_VPACK(c, src[0], src[1]);
+ break;
+
+ case nir_op_v11fpack_v3d:
+ result = vir_V11FPACK(c, src[0], src[1]);
+ break;
+
+ case nir_op_v10pack_v3d:
+ result = vir_V10PACK(c, src[0], src[1]);
+ break;
+
+ case nir_op_v8pack_v3d:
+ result = vir_V8PACK(c, src[0], src[1]);
+ break;
+
case nir_op_unpack_half_2x16_split_x:
result = vir_FMOV(c, src[0]);
vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_L);
@@ -1698,6 +1714,30 @@ ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_H);
break;
+ case nir_op_vftounorm8_v3d:
+ result = vir_VFTOUNORM8(c, src[0]);
+ break;
+
+ case nir_op_vftosnorm8_v3d:
+ result = vir_VFTOSNORM8(c, src[0]);
+ break;
+
+ case nir_op_vftounorm10lo_v3d:
+ result = vir_VFTOUNORM10LO(c, src[0]);
+ break;
+
+ case nir_op_vftounorm10hi_v3d:
+ result = vir_VFTOUNORM10HI(c, src[0]);
+ break;
+
+ case nir_op_ftounorm16_v3d:
+ result = vir_FTOUNORM16(c, src[0]);
+ break;
+
+ case nir_op_ftosnorm16_v3d:
+ result = vir_FTOSNORM16(c, src[0]);
+ break;
+
default:
fprintf(stderr, "unknown NIR ALU inst: ");
nir_print_instr(&instr->instr, stderr);
diff --git a/src/broadcom/compiler/v3d_compiler.h b/src/broadcom/compiler/v3d_compiler.h
index 095b33c03b8..5714e85d2b8 100644
--- a/src/broadcom/compiler/v3d_compiler.h
+++ b/src/broadcom/compiler/v3d_compiler.h
@@ -1180,7 +1180,7 @@ bool v3d_nir_lower_line_smooth(nir_shader *shader);
bool v3d_nir_lower_logic_ops(nir_shader *s, struct v3d_compile *c);
bool v3d_nir_lower_scratch(nir_shader *s);
bool v3d_nir_lower_txf_ms(nir_shader *s);
-bool v3d_nir_lower_image_load_store(nir_shader *s);
+bool v3d_nir_lower_image_load_store(nir_shader *s, struct v3d_compile *c);
bool v3d_nir_lower_load_store_bitsize(nir_shader *s);
void v3d33_vir_vpm_read_setup(struct v3d_compile *c, int num_components);
@@ -1421,6 +1421,20 @@ VIR_SFU(LOG)
VIR_SFU(SIN)
VIR_SFU(RSQRT2)
+VIR_A_ALU2(VPACK)
+VIR_A_ALU2(V8PACK)
+VIR_A_ALU2(V10PACK)
+VIR_A_ALU2(V11FPACK)
+
+VIR_M_ALU1(FTOUNORM16)
+VIR_M_ALU1(FTOSNORM16)
+
+VIR_M_ALU1(VFTOUNORM8)
+VIR_M_ALU1(VFTOSNORM8)
+
+VIR_M_ALU1(VFTOUNORM10LO)
+VIR_M_ALU1(VFTOUNORM10HI)
+
static inline struct qinst *
vir_MOV_cond(struct v3d_compile *c, enum v3d_qpu_cond cond,
struct qreg dest, struct qreg src)
diff --git a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
index 5f8363377cb..ec43f834897 100644
--- a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
+++ b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
@@ -40,6 +40,10 @@
* calculations and load/store using the TMU general memory access path.
*/
+static const unsigned bits_8[4] = {8, 8, 8, 8};
+static const unsigned bits_16[4] = {16, 16, 16, 16};
+static const unsigned bits_1010102[4] = {10, 10, 10, 2};
+
bool
v3d_gl_format_is_return_32(enum pipe_format format)
{
@@ -59,6 +63,8 @@ v3d_gl_format_is_return_32(enum pipe_format format)
/* Packs a 32-bit vector of colors in the range [0, (1 << bits[i]) - 1] to a
* 32-bit SSA value, with as many channels as necessary to store all the bits
+ *
+ * This is the generic helper, using all common nir operations.
*/
static nir_def *
pack_bits(nir_builder *b, nir_def *color, const unsigned *bits,
@@ -91,8 +97,180 @@ pack_bits(nir_builder *b, nir_def *color, const unsigned *bits,
return nir_vec(b, results, DIV_ROUND_UP(offset, 32));
}
+/* Utility wrapper as half_2x16_split is mapped to vfpack, and sometimes it is
+ * just easier to read vfpack on the code, specially while using the PRM as
+ * reference
+ */
+static inline nir_def *
+nir_vfpack(nir_builder *b, nir_def *p1, nir_def *p2)
+{
+ return nir_pack_half_2x16_split(b, p1, p2);
+}
+
+static inline nir_def *
+pack_11f11f10f(nir_builder *b, nir_def *color)
+{
+ nir_def *p1 = nir_vfpack(b, nir_channel(b, color, 0),
+ nir_channel(b, color, 1));
+ nir_def *undef = nir_undef(b, 1, color->bit_size);
+ nir_def *p2 = nir_vfpack(b, nir_channel(b, color, 2), undef);
+
+ return nir_v11fpack_v3d(b, p1, p2);
+}
+
+static inline nir_def *
+pack_r10g10b10a2_uint(nir_builder *b, nir_def *color)
+{
+ nir_def *p1 = nir_vpack_v3d(b, nir_channel(b, color, 0),
+ nir_channel(b, color, 1));
+ nir_def *p2 = nir_vpack_v3d(b, nir_channel(b, color, 2),
+ nir_channel(b, color, 3));
+
+ return nir_v10pack_v3d(b, p1, p2);
+}
+
+static inline nir_def *
+pack_r10g10b10a2_unorm(nir_builder *b, nir_def *color)
+{
+ nir_def *p1 = nir_vfpack(b, nir_channel(b, color, 0),
+ nir_channel(b, color, 1));
+ p1 = nir_vftounorm10lo_v3d(b, p1);
+
+ nir_def *p2 = nir_vfpack(b, nir_channel(b, color, 2),
+ nir_channel(b, color, 3));
+ p2 = nir_vftounorm10hi_v3d(b, p2);
+
+ return nir_v10pack_v3d(b, p1, p2);
+}
+
+enum hw_conversion {
+ NONE,
+ TO_SNORM,
+ TO_UNORM
+};
+
+static inline nir_def *
+pack_8bit(nir_builder *b, nir_def *color,
+ unsigned num_components,
+ enum hw_conversion conversion)
+{
+ /* Note that usually you should not use this method (that relies on
+ * custom packing) for 1 component if we are not doing any
+ * conversion. But we support also that case, and let the caller
+ * decide which method to use.
+ */
+ nir_def *p1;
+ nir_def *p2;
+
+ if (conversion == NONE) {
+ p1 = nir_vpack_v3d(b, nir_channel(b, color, 0),
+ nir_channel(b, color, num_components == 1 ? 0 : 1));
+ } else {
+ p1 = nir_vfpack(b, nir_channel(b, color, 0),
+ nir_channel(b, color, num_components == 1 ? 0 : 1));
+ p1 = (conversion == TO_UNORM) ?
+ nir_vftounorm8_v3d(b, p1) : nir_vftosnorm8_v3d(b, p1);
+ }
+ if (num_components == 4) {
+ if (conversion == NONE) {
+ p2 = nir_vpack_v3d(b, nir_channel(b, color, 2),
+ nir_channel(b, color, 3));
+ } else {
+ p2 = nir_vfpack(b, nir_channel(b, color, 2),
+ nir_channel(b, color, 3));
+ p2 = (conversion == TO_UNORM) ?
+ nir_vftounorm8_v3d(b, p2) : nir_vftosnorm8_v3d(b, p2);
+ }
+ } else {
+ /* Using an undef here would be more correct. But for this
+ * case we are getting worse shader-db values with some CTS
+ * tests, so we just reuse the first packing.
+ */
+ p2 = p1;
+ }
+
+ return nir_v8pack_v3d(b, p1, p2);
+}
+
+static inline nir_def *
+pack_16bit(nir_builder *b, nir_def *color,
+ unsigned num_components,
+ enum hw_conversion conversion)
+{
+ nir_def *results[2];
+ nir_def *channels[4];
+
+ /* Note that usually you should not use this method (that relies on
+ * custom packing) if we are not doing any conversion. But we support
+ * also that case, and let the caller decide which method to use.
+ */
+
+ for (unsigned i = 0; i < num_components; i++) {
+ channels[i] = nir_channel(b, color, i);
+ switch (conversion) {
+ case TO_SNORM:
+ channels[i] = nir_ftosnorm16_v3d(b, channels[i]);
+ break;
+ case TO_UNORM:
+ channels[i] = nir_ftounorm16_v3d(b, channels[i]);
+ break;
+ default:
+ break;
+ }
+ }
+
+ switch (num_components) {
+ case 1:
+ results[0] = channels[0];
+ break;
+ case 4:
+ results[1] = nir_vpack_v3d(b, channels[2], channels[3]);
+ FALLTHROUGH;
+ case 2:
+ results[0] = nir_vpack_v3d(b, channels[0], channels[1]);
+ break;
+ }
+
+ return nir_vec(b, results, DIV_ROUND_UP(num_components, 2));
+}
+
+static inline nir_def *
+pack_xbit(nir_builder *b, nir_def *color,
+ unsigned num_components,
+ const struct util_format_channel_description *r_chan)
+{
+ bool pack_mask = (r_chan->type == UTIL_FORMAT_TYPE_SIGNED);
+ enum hw_conversion conversion = NONE;
+ if (r_chan->normalized) {
+ conversion =
+ (r_chan->type == UTIL_FORMAT_TYPE_UNSIGNED) ? TO_UNORM : TO_SNORM;
+ }
+
+ switch (r_chan->size) {
+ case 8:
+ if (conversion == NONE && num_components < 2)
+ return pack_bits(b, color, bits_8, num_components, pack_mask);
+ else
+ return pack_8bit(b, color, num_components, conversion);
+ break;
+ case 16:
+ /* pack_mask implies that the generic packing method would
+ * need to include extra operations to handle negative values,
+ * so in that case, even without a conversion, it is better to
+ * use the packing using custom hw operations.
+ */
+ if (conversion == NONE && !pack_mask)
+ return pack_bits(b, color, bits_16, num_components, pack_mask);
+ else
+ return pack_16bit(b, color, num_components, conversion);
+ break;
+ default:
+ unreachable("unrecognized bits");
+ }
+}
+
static bool
-v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
+v3d_nir_lower_image_store_v42(nir_builder *b, nir_intrinsic_instr *instr)
{
enum pipe_format format = nir_intrinsic_format(instr);
assert(format != PIPE_FORMAT_NONE);
@@ -118,9 +296,6 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
*/
formatted = color;
} else {
- static const unsigned bits_8[4] = {8, 8, 8, 8};
- static const unsigned bits_16[4] = {16, 16, 16, 16};
- static const unsigned bits_1010102[4] = {10, 10, 10, 2};
const unsigned *bits;
switch (r_chan->size) {
@@ -170,6 +345,50 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
return true;
}
+
+static bool
+v3d_nir_lower_image_store_v71(nir_builder *b, nir_intrinsic_instr *instr)
+{
+ enum pipe_format format = nir_intrinsic_format(instr);
+ assert(format != PIPE_FORMAT_NONE);
+ const struct util_format_description *desc =
+ util_format_description(format);
+ const struct util_format_channel_description *r_chan = &desc->channel[0];
+ unsigned num_components = util_format_get_nr_components(format);
+ b->cursor = nir_before_instr(&instr->instr);
+
+ nir_def *color =
+ nir_trim_vector(b, instr->src[3].ssa, num_components);
+ nir_def *formatted = NULL;
+ if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
+ formatted = nir_format_pack_r9g9b9e5(b, color);
+ } else if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
+ formatted = pack_11f11f10f(b, color);
+ } else if (format == PIPE_FORMAT_R10G10B10A2_UINT) {
+ formatted = pack_r10g10b10a2_uint(b, color);
+ } else if (format == PIPE_FORMAT_R10G10B10A2_UNORM) {
+ formatted = pack_r10g10b10a2_unorm(b, color);
+ } else if (r_chan->size == 32) {
+ /* For 32-bit formats, we just have to move the vector
+ * across (possibly reducing the number of channels).
+ */
+ formatted = color;
+ } else if (r_chan->type == UTIL_FORMAT_TYPE_FLOAT) {
+ assert(r_chan->size == 16);
+ formatted = nir_format_float_to_half(b, color);
+ formatted = pack_bits(b, formatted, bits_16, num_components,
+ false);
+ } else {
+ assert(r_chan->size == 8 || r_chan->size == 16);
+ formatted = pack_xbit(b, color, num_components, r_chan);
+ }
+
+ nir_src_rewrite(&instr->src[3], formatted);
+ instr->num_components = formatted->num_components;
+
+ return true;
+}
+
static bool
v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
{
@@ -207,11 +426,17 @@ v3d_nir_lower_image_load_store_cb(nir_builder *b,
nir_intrinsic_instr *intr,
void *_state)
{
+ struct v3d_compile *c = (struct v3d_compile *) _state;
+
switch (intr->intrinsic) {
case nir_intrinsic_image_load:
return v3d_nir_lower_image_load(b, intr);
case nir_intrinsic_image_store:
- return v3d_nir_lower_image_store(b, intr);
+ if (c->devinfo->ver >= 71)
+ return v3d_nir_lower_image_store_v71(b, intr);
+ else
+ return v3d_nir_lower_image_store_v42(b, intr);
+ break;
default:
return false;
}
@@ -220,10 +445,10 @@ v3d_nir_lower_image_load_store_cb(nir_builder *b,
}
bool
-v3d_nir_lower_image_load_store(nir_shader *s)
+v3d_nir_lower_image_load_store(nir_shader *s, struct v3d_compile *c)
{
return nir_shader_intrinsics_pass(s,
v3d_nir_lower_image_load_store_cb,
nir_metadata_block_index |
- nir_metadata_dominance, NULL);
+ nir_metadata_dominance, c);
}
diff --git a/src/broadcom/compiler/vir.c b/src/broadcom/compiler/vir.c
index 8c536b8fbcc..acb13a6cbf9 100644
--- a/src/broadcom/compiler/vir.c
+++ b/src/broadcom/compiler/vir.c
@@ -1599,7 +1599,7 @@ v3d_attempt_compile(struct v3d_compile *c)
NIR_PASS(_, c->s, v3d_nir_lower_io, c);
NIR_PASS(_, c->s, v3d_nir_lower_txf_ms);
- NIR_PASS(_, c->s, v3d_nir_lower_image_load_store);
+ NIR_PASS(_, c->s, v3d_nir_lower_image_load_store, c);
NIR_PASS(_, c->s, nir_opt_idiv_const, 8);
nir_lower_idiv_options idiv_options = {
--
2.39.2