1
0
Fork 0

lavc/flacdsp: R-V V LPC16 function

In this case, the inner loop computing the scalar product can be reduced
to just one multiplication and one sum even with 128-bit vectors. The
result is a lot simpler, but also brings more modest performance gains:

flac_lpc_16_13_c:       15241.0
flac_lpc_16_13_rvv_i32: 11230.0
flac_lpc_16_16_c:       17884.0
flac_lpc_16_16_rvv_i32: 12125.7
flac_lpc_16_29_c:       27847.7
flac_lpc_16_29_rvv_i32: 10494.0
flac_lpc_16_32_c:       30051.5
flac_lpc_16_32_rvv_i32: 10355.0
This commit is contained in:
Rémi Denis-Courmont 2023-11-15 21:31:17 +02:00
parent 295092b46d
commit ca664f2254
2 changed files with 35 additions and 5 deletions

View File

@ -25,6 +25,8 @@
#include "libavutil/riscv/cpu.h"
#include "libavcodec/flacdsp.h"
void ff_flac_lpc16_rvv(int32_t *decoded, const int coeffs[32],
int pred_order, int qlevel, int len);
void ff_flac_lpc32_rvv(int32_t *decoded, const int coeffs[32],
int pred_order, int qlevel, int len);
void ff_flac_lpc32_rvv_simple(int32_t *decoded, const int coeffs[32],
@ -61,16 +63,20 @@ void ff_flac_decorrelate_ms_32_rvv(uint8_t **out, int32_t **in,
av_cold void ff_flacdsp_init_riscv(FLACDSPContext *c, enum AVSampleFormat fmt,
int channels)
{
#if HAVE_RVV && (__riscv_xlen >= 64)
#if HAVE_RVV
int flags = av_get_cpu_flags();
if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB_ADDR)) {
int vlenb = ff_get_rv_vlenb();
if (vlenb == 16)
c->lpc32 = ff_flac_lpc32_rvv;
else if (vlenb > 16)
c->lpc32 = ff_flac_lpc32_rvv_simple;
if (vlenb >= 16) {
c->lpc16 = ff_flac_lpc16_rvv;
# if (__riscv_xlen >= 64)
if (vlenb > 16)
c->lpc32 = ff_flac_lpc32_rvv_simple;
else
c->lpc32 = ff_flac_lpc32_rvv;
}
switch (fmt) {
case AV_SAMPLE_FMT_S16:
@ -111,6 +117,7 @@ av_cold void ff_flacdsp_init_riscv(FLACDSPContext *c, enum AVSampleFormat fmt,
c->decorrelate[2] = ff_flac_decorrelate_rs_32_rvv;
c->decorrelate[3] = ff_flac_decorrelate_ms_32_rvv;
break;
# endif
}
}
#endif

View File

@ -20,6 +20,29 @@
#include "libavutil/riscv/asm.S"
func ff_flac_lpc16_rvv, zve32x
vsetvli zero, a2, e32, m8, ta, ma
vle32.v v8, (a1)
sub a4, a4, a2
vle32.v v16, (a0)
sh2add a0, a2, a0
vmv.s.x v0, zero
1:
vmul.vv v24, v8, v16
lw t0, (a0)
vredsum.vs v24, v24, v0
addi a4, a4, -1
vmv.x.s t1, v24
sra t1, t1, a3
add t0, t0, t1
vslide1down.vx v16, v16, t0
sw t0, (a0)
addi a0, a0, 4
bnez a4, 1b
ret
endfunc
#if (__riscv_xlen == 64)
func ff_flac_lpc32_rvv, zve32x
addi t2, a2, -16