1
0

aacdec: remove AAC-specific MIPS optimizations

The code was written in 2012, but seems to have been broken
for just as long. Compilation is broken on every MIPS/MIPS64
system with an FPU (which the code depends on).
This commit is contained in:
Lynne 2024-03-21 08:23:55 +01:00
parent ae7c6cc17d
commit 03cf101645
No known key found for this signature in database
GPG Key ID: A2FEA5F03F034464
18 changed files with 1 additions and 3494 deletions

View File

@ -345,8 +345,6 @@ extern const AACDecDSP aac_dsp_fixed;
extern const AACDecProc aac_proc;
extern const AACDecProc aac_proc_fixed;
void ff_aacdec_init_mips(AACDecContext *c);
int ff_aac_decode_ics(AACDecContext *ac, SingleChannelElement *sce,
GetBitContext *gb, int common_window, int scale_flag);

View File

@ -74,10 +74,6 @@ static int init(AACDecContext *ac)
ff_aac_float_common_init();
#if ARCH_MIPS
ff_aacdec_init_mips(ac);
#endif
return 0;
}

View File

@ -54,7 +54,6 @@ typedef struct PSDSPContext {
void AAC_RENAME(ff_psdsp_init)(PSDSPContext *s);
void ff_psdsp_init_arm(PSDSPContext *s);
void ff_psdsp_init_aarch64(PSDSPContext *s);
void ff_psdsp_init_mips(PSDSPContext *s);
void ff_psdsp_init_riscv(PSDSPContext *s);
void ff_psdsp_init_x86(PSDSPContext *s);

View File

@ -226,8 +226,6 @@ av_cold void AAC_RENAME(ff_psdsp_init)(PSDSPContext *s)
ff_psdsp_init_arm(s);
#elif ARCH_AARCH64
ff_psdsp_init_aarch64(s);
#elif ARCH_MIPS
ff_psdsp_init_mips(s);
#elif ARCH_RISCV
ff_psdsp_init_riscv(s);
#elif ARCH_X86

View File

@ -223,10 +223,6 @@ static const float psy_fir_coeffs[] = {
-5.52212e-17 * 2, -0.313819 * 2
};
#if ARCH_MIPS
# include "mips/aacpsy_mips.h"
#endif /* ARCH_MIPS */
/**
* Calculate the ABR attack threshold from the above LAME psymodel table.
*/

View File

@ -43,10 +43,6 @@
#include <float.h>
#include <math.h>
#if ARCH_MIPS
#include "mips/aacsbr_mips.h"
#endif /* ARCH_MIPS */
/**
* 2^(x) for integer x
* @return correctly rounded float

View File

@ -1579,10 +1579,4 @@ static void aacsbr_func_ptr_init(AACSBRContext *c)
c->sbr_hf_assemble = sbr_hf_assemble;
c->sbr_x_gen = sbr_x_gen;
c->sbr_hf_inverse_filter = sbr_hf_inverse_filter;
#if !USE_FIXED
#if ARCH_MIPS
ff_aacsbr_func_ptr_init_mips(c);
#endif
#endif
}

View File

@ -1,5 +1,4 @@
ARCH_HEADERS = aacsbr_mips.h aacpsy_mips.h \
cabac.h compute_antialias_fixed.h \
ARCH_HEADERS = cabac.h compute_antialias_fixed.h \
compute_antialias_float.h \
MIPSFPU-OBJS-$(CONFIG_AMRNB_DECODER) += mips/acelp_filters_mips.o \
@ -15,10 +14,6 @@ MIPSFPU-OBJS-$(CONFIG_MPEGAUDIODSP) += mips/mpegaudiodsp_mips_float.o
MIPSDSP-OBJS-$(CONFIG_MPEGAUDIODSP) += mips/mpegaudiodsp_mips_fixed.o
MIPSFPU-OBJS-$(CONFIG_FMTCONVERT) += mips/fmtconvert_mips.o
OBJS-$(CONFIG_AC3DSP) += mips/ac3dsp_mips.o
OBJS-$(CONFIG_AAC_DECODER) += mips/aacdec_mips.o \
mips/aacsbr_mips.o \
mips/sbrdsp_mips.o \
mips/aacpsdsp_mips.o
MIPSFPU-OBJS-$(CONFIG_AAC_ENCODER) += mips/iirfilter_mips.o
OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_init_mips.o \
mips/hevcpred_init_mips.o

View File

@ -1,471 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Darko Laus (darko@mips.com)
* Djordje Pesut (djordje@mips.com)
* Mirjana Vulin (mvulin@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/aacdec.c
*/
#include "libavutil/attributes.h"
#include "libavcodec/aacdec.h"
#include "aacdec_mips.h"
#include "libavcodec/aactab.h"
#include "libavcodec/sinewin.h"
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
static av_always_inline void float_copy(float *dst, const float *src, int count)
{
// Copy 'count' floats from src to dst
const float *loop_end = src + count;
int temp[8];
// count must be a multiple of 8
av_assert2(count % 8 == 0);
// loop unrolled 8 times
__asm__ volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lw %[temp0], 0(%[src]) \n\t"
"lw %[temp1], 4(%[src]) \n\t"
"lw %[temp2], 8(%[src]) \n\t"
"lw %[temp3], 12(%[src]) \n\t"
"lw %[temp4], 16(%[src]) \n\t"
"lw %[temp5], 20(%[src]) \n\t"
"lw %[temp6], 24(%[src]) \n\t"
"lw %[temp7], 28(%[src]) \n\t"
PTR_ADDIU "%[src], %[src], 32 \n\t"
"sw %[temp0], 0(%[dst]) \n\t"
"sw %[temp1], 4(%[dst]) \n\t"
"sw %[temp2], 8(%[dst]) \n\t"
"sw %[temp3], 12(%[dst]) \n\t"
"sw %[temp4], 16(%[dst]) \n\t"
"sw %[temp5], 20(%[dst]) \n\t"
"sw %[temp6], 24(%[dst]) \n\t"
"sw %[temp7], 28(%[dst]) \n\t"
"bne %[src], %[loop_end], 1b \n\t"
PTR_ADDIU "%[dst], %[dst], 32 \n\t"
".set pop \n\t"
: [temp0]"=&r"(temp[0]), [temp1]"=&r"(temp[1]),
[temp2]"=&r"(temp[2]), [temp3]"=&r"(temp[3]),
[temp4]"=&r"(temp[4]), [temp5]"=&r"(temp[5]),
[temp6]"=&r"(temp[6]), [temp7]"=&r"(temp[7]),
[src]"+r"(src), [dst]"+r"(dst)
: [loop_end]"r"(loop_end)
: "memory"
);
}
static av_always_inline int lcg_random(unsigned previous_val)
{
union { unsigned u; int s; } v = { previous_val * 1664525u + 1013904223 };
return v.s;
}
static void imdct_and_windowing_mips(AACDecContext *ac, SingleChannelElement *sce)
{
IndividualChannelStream *ics = &sce->ics;
float *in = sce->coeffs;
float *out = sce->output;
float *saved = sce->saved;
const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
float *buf = ac->buf_mdct;
int i;
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
for (i = 0; i < 1024; i += 128)
ac->mdct128_fn(ac->mdct128, buf + i, in + i, sizeof(float));
} else
ac->mdct1024_fn(ac->mdct1024, buf, in, sizeof(float));
/* window overlapping
* NOTE: To simplify the overlapping code, all 'meaningless' short to long
* and long to short transitions are considered to be short to short
* transitions. This leaves just two cases (long to long and short to short)
* with a little special sauce for EIGHT_SHORT_SEQUENCE.
*/
if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
(ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
ac->fdsp->vector_fmul_window( out, saved, buf, lwindow_prev, 512);
} else {
float_copy(out, saved, 448);
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
{
float wi;
float wj;
int i;
float temp0, temp1, temp2, temp3;
float *dst0 = out + 448 + 0*128;
float *dst1 = dst0 + 64 + 63;
float *dst2 = saved + 63;
float *win0 = (float*)swindow;
float *win1 = win0 + 64 + 63;
float *win0_prev = (float*)swindow_prev;
float *win1_prev = win0_prev + 64 + 63;
float *src0_prev = saved + 448;
float *src1_prev = buf + 0*128 + 63;
float *src0 = buf + 0*128 + 64;
float *src1 = buf + 1*128 + 63;
for(i = 0; i < 64; i++)
{
temp0 = src0_prev[0];
temp1 = src1_prev[0];
wi = *win0_prev;
wj = *win1_prev;
temp2 = src0[0];
temp3 = src1[0];
dst0[0] = temp0 * wj - temp1 * wi;
dst1[0] = temp0 * wi + temp1 * wj;
wi = *win0;
wj = *win1;
temp0 = src0[128];
temp1 = src1[128];
dst0[128] = temp2 * wj - temp3 * wi;
dst1[128] = temp2 * wi + temp3 * wj;
temp2 = src0[256];
temp3 = src1[256];
dst0[256] = temp0 * wj - temp1 * wi;
dst1[256] = temp0 * wi + temp1 * wj;
dst0[384] = temp2 * wj - temp3 * wi;
dst1[384] = temp2 * wi + temp3 * wj;
temp0 = src0[384];
temp1 = src1[384];
dst0[512] = temp0 * wj - temp1 * wi;
dst2[0] = temp0 * wi + temp1 * wj;
src0++;
src1--;
src0_prev++;
src1_prev--;
win0++;
win1--;
win0_prev++;
win1_prev--;
dst0++;
dst1--;
dst2--;
}
}
} else {
ac->fdsp->vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
float_copy(out + 576, buf + 64, 448);
}
}
// buffer update
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
ac->fdsp->vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
ac->fdsp->vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
ac->fdsp->vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
float_copy(saved + 448, buf + 7*128 + 64, 64);
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
float_copy(saved, buf + 512, 448);
float_copy(saved + 448, buf + 7*128 + 64, 64);
} else { // LONG_STOP or ONLY_LONG
float_copy(saved, buf + 512, 512);
}
}
/**
* Apply windowing and MDCT to obtain the spectral
* coefficient from the predicted sample by LTP.
*/
static inline void windowing_and_mdct_ltp(AACDecContext *ac,
float *out, float *in,
IndividualChannelStream *ics)
{
const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024) : ff_sine_1024;
const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128) : ff_sine_128;
const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024) : ff_sine_1024;
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128) : ff_sine_128;
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
ac->fdsp->vector_fmul(in, in, lwindow_prev, 1024);
} else {
memset(in, 0, 448 * sizeof(*in));
ac->fdsp->vector_fmul(in + 448, in + 448, swindow_prev, 128);
}
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
ac->fdsp->vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
} else {
ac->fdsp->vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
memset(in + 1024 + 576, 0, 448 * sizeof(*in));
}
ac->mdct_ltp_fn(ac->mdct_ltp, out, in, sizeof(INTFLOAT));
}
static void apply_ltp_mips(AACDecContext *ac, SingleChannelElement *sce)
{
const LongTermPrediction *ltp = &sce->ics.ltp;
const uint16_t *offsets = sce->ics.swb_offset;
int i, sfb;
int j, k;
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
float *predTime = sce->output;
float *predFreq = ac->buf_mdct;
float *p_predTime;
int16_t num_samples = 2048;
if (ltp->lag < 1024)
num_samples = ltp->lag + 1024;
j = (2048 - num_samples) >> 2;
k = (2048 - num_samples) & 3;
p_predTime = &predTime[num_samples];
for (i = 0; i < num_samples; i++)
predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
for (i = 0; i < j; i++) {
/* loop unrolled 4 times */
__asm__ volatile (
"sw $0, 0(%[p_predTime]) \n\t"
"sw $0, 4(%[p_predTime]) \n\t"
"sw $0, 8(%[p_predTime]) \n\t"
"sw $0, 12(%[p_predTime]) \n\t"
PTR_ADDIU "%[p_predTime], %[p_predTime], 16 \n\t"
: [p_predTime]"+r"(p_predTime)
:
: "memory"
);
}
for (i = 0; i < k; i++) {
__asm__ volatile (
"sw $0, 0(%[p_predTime]) \n\t"
PTR_ADDIU "%[p_predTime], %[p_predTime], 4 \n\t"
: [p_predTime]"+r"(p_predTime)
:
: "memory"
);
}
windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
if (sce->tns.present)
ac->dsp.apply_tns(predFreq, &sce->tns, &sce->ics, 0);
for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
if (ltp->used[sfb])
for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
sce->coeffs[i] += predFreq[i];
}
}
static av_always_inline void fmul_and_reverse(float *dst, const float *src0, const float *src1, int count)
{
/* Multiply 'count' floats in src0 by src1 and store the results in dst in reverse */
/* This should be equivalent to a normal fmul, followed by reversing dst */
// count must be a multiple of 4
av_assert2(count % 4 == 0);
// move src0 and src1 to the last element of their arrays
src0 += count - 1;
src1 += count - 1;
for (; count > 0; count -= 4){
float temp[12];
/* loop unrolled 4 times */
__asm__ volatile (
"lwc1 %[temp0], 0(%[ptr2]) \n\t"
"lwc1 %[temp1], -4(%[ptr2]) \n\t"
"lwc1 %[temp2], -8(%[ptr2]) \n\t"
"lwc1 %[temp3], -12(%[ptr2]) \n\t"
"lwc1 %[temp4], 0(%[ptr3]) \n\t"
"lwc1 %[temp5], -4(%[ptr3]) \n\t"
"lwc1 %[temp6], -8(%[ptr3]) \n\t"
"lwc1 %[temp7], -12(%[ptr3]) \n\t"
"mul.s %[temp8], %[temp0], %[temp4] \n\t"
"mul.s %[temp9], %[temp1], %[temp5] \n\t"
"mul.s %[temp10], %[temp2], %[temp6] \n\t"
"mul.s %[temp11], %[temp3], %[temp7] \n\t"
"swc1 %[temp8], 0(%[ptr1]) \n\t"
"swc1 %[temp9], 4(%[ptr1]) \n\t"
"swc1 %[temp10], 8(%[ptr1]) \n\t"
"swc1 %[temp11], 12(%[ptr1]) \n\t"
PTR_ADDIU "%[ptr1], %[ptr1], 16 \n\t"
PTR_ADDIU "%[ptr2], %[ptr2], -16 \n\t"
PTR_ADDIU "%[ptr3], %[ptr3], -16 \n\t"
: [temp0]"=&f"(temp[0]), [temp1]"=&f"(temp[1]),
[temp2]"=&f"(temp[2]), [temp3]"=&f"(temp[3]),
[temp4]"=&f"(temp[4]), [temp5]"=&f"(temp[5]),
[temp6]"=&f"(temp[6]), [temp7]"=&f"(temp[7]),
[temp8]"=&f"(temp[8]), [temp9]"=&f"(temp[9]),
[temp10]"=&f"(temp[10]), [temp11]"=&f"(temp[11]),
[ptr1]"+r"(dst), [ptr2]"+r"(src0), [ptr3]"+r"(src1)
:
: "memory"
);
}
}
static void update_ltp_mips(AACDecContext *ac, SingleChannelElement *sce)
{
IndividualChannelStream *ics = &sce->ics;
float *saved = sce->saved;
float *saved_ltp = sce->coeffs;
const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
float *p_saved_ltp = saved_ltp + 576;
float *loop_end1 = p_saved_ltp + 448;
float_copy(saved_ltp, saved, 512);
/* loop unrolled 8 times */
__asm__ volatile (
"1: \n\t"
"sw $0, 0(%[p_saved_ltp]) \n\t"
"sw $0, 4(%[p_saved_ltp]) \n\t"
"sw $0, 8(%[p_saved_ltp]) \n\t"
"sw $0, 12(%[p_saved_ltp]) \n\t"
"sw $0, 16(%[p_saved_ltp]) \n\t"
"sw $0, 20(%[p_saved_ltp]) \n\t"
"sw $0, 24(%[p_saved_ltp]) \n\t"
"sw $0, 28(%[p_saved_ltp]) \n\t"
PTR_ADDIU "%[p_saved_ltp],%[p_saved_ltp], 32 \n\t"
"bne %[p_saved_ltp], %[loop_end1], 1b \n\t"
: [p_saved_ltp]"+r"(p_saved_ltp)
: [loop_end1]"r"(loop_end1)
: "memory"
);
ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
fmul_and_reverse(saved_ltp + 512, ac->buf_mdct + 960, swindow, 64);
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
float *buff0 = saved;
float *buff1 = saved_ltp;
float *loop_end = saved + 448;
/* loop unrolled 8 times */
__asm__ volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lw %[temp0], 0(%[src]) \n\t"
"lw %[temp1], 4(%[src]) \n\t"
"lw %[temp2], 8(%[src]) \n\t"
"lw %[temp3], 12(%[src]) \n\t"
"lw %[temp4], 16(%[src]) \n\t"
"lw %[temp5], 20(%[src]) \n\t"
"lw %[temp6], 24(%[src]) \n\t"
"lw %[temp7], 28(%[src]) \n\t"
PTR_ADDIU "%[src], %[src], 32 \n\t"
"sw %[temp0], 0(%[dst]) \n\t"
"sw %[temp1], 4(%[dst]) \n\t"
"sw %[temp2], 8(%[dst]) \n\t"
"sw %[temp3], 12(%[dst]) \n\t"
"sw %[temp4], 16(%[dst]) \n\t"
"sw %[temp5], 20(%[dst]) \n\t"
"sw %[temp6], 24(%[dst]) \n\t"
"sw %[temp7], 28(%[dst]) \n\t"
"sw $0, 2304(%[dst]) \n\t"
"sw $0, 2308(%[dst]) \n\t"
"sw $0, 2312(%[dst]) \n\t"
"sw $0, 2316(%[dst]) \n\t"
"sw $0, 2320(%[dst]) \n\t"
"sw $0, 2324(%[dst]) \n\t"
"sw $0, 2328(%[dst]) \n\t"
"sw $0, 2332(%[dst]) \n\t"
"bne %[src], %[loop_end], 1b \n\t"
PTR_ADDIU "%[dst], %[dst], 32 \n\t"
".set pop \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
[src]"+r"(buff0), [dst]"+r"(buff1)
: [loop_end]"r"(loop_end)
: "memory"
);
ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
fmul_and_reverse(saved_ltp + 512, ac->buf_mdct + 960, swindow, 64);
} else { // LONG_STOP or ONLY_LONG
ac->fdsp->vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
fmul_and_reverse(saved_ltp + 512, ac->buf_mdct + 512, lwindow, 512);
}
float_copy(sce->ltp_state, sce->ltp_state + 1024, 1024);
float_copy(sce->ltp_state + 1024, sce->output, 1024);
float_copy(sce->ltp_state + 2048, saved_ltp, 1024);
}
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
void ff_aacdec_init_mips(AACDecContext *c)
{
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
c->dsp.imdct_and_windowing = imdct_and_windowing_mips;
c->dsp.apply_ltp = apply_ltp_mips;
c->update_ltp = update_ltp_mips;
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
}

View File

@ -1,253 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Darko Laus (darko@mips.com)
* Djordje Pesut (djordje@mips.com)
* Mirjana Vulin (mvulin@mips.com)
*
* AAC Spectral Band Replication decoding functions optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/aacdec.c
*/
#ifndef AVCODEC_MIPS_AACDEC_MIPS_H
#define AVCODEC_MIPS_AACDEC_MIPS_H
#include "libavcodec/aacdec.h"
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM && HAVE_MIPSFPU
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static inline float *VMUL2_mips(float *dst, const float *v, unsigned idx,
const float *scale)
{
float temp0, temp1, temp2;
int temp3, temp4;
float *ret;
__asm__ volatile(
"andi %[temp3], %[idx], 0x0F \n\t"
"andi %[temp4], %[idx], 0xF0 \n\t"
"sll %[temp3], %[temp3], 2 \n\t"
"srl %[temp4], %[temp4], 2 \n\t"
"lwc1 %[temp2], 0(%[scale]) \n\t"
"lwxc1 %[temp0], %[temp3](%[v]) \n\t"
"lwxc1 %[temp1], %[temp4](%[v]) \n\t"
"mul.s %[temp0], %[temp0], %[temp2] \n\t"
"mul.s %[temp1], %[temp1], %[temp2] \n\t"
PTR_ADDIU "%[ret], %[dst], 8 \n\t"
"swc1 %[temp0], 0(%[dst]) \n\t"
"swc1 %[temp1], 4(%[dst]) \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
[temp2]"=&f"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [ret]"=&r"(ret)
: [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
[dst]"r"(dst)
: "memory"
);
return ret;
}
static inline float *VMUL4_mips(float *dst, const float *v, unsigned idx,
const float *scale)
{
int temp0, temp1, temp2, temp3;
float temp4, temp5, temp6, temp7, temp8;
float *ret;
__asm__ volatile(
"andi %[temp0], %[idx], 0x03 \n\t"
"andi %[temp1], %[idx], 0x0C \n\t"
"andi %[temp2], %[idx], 0x30 \n\t"
"andi %[temp3], %[idx], 0xC0 \n\t"
"sll %[temp0], %[temp0], 2 \n\t"
"srl %[temp2], %[temp2], 2 \n\t"
"srl %[temp3], %[temp3], 4 \n\t"
"lwc1 %[temp4], 0(%[scale]) \n\t"
"lwxc1 %[temp5], %[temp0](%[v]) \n\t"
"lwxc1 %[temp6], %[temp1](%[v]) \n\t"
"lwxc1 %[temp7], %[temp2](%[v]) \n\t"
"lwxc1 %[temp8], %[temp3](%[v]) \n\t"
"mul.s %[temp5], %[temp5], %[temp4] \n\t"
"mul.s %[temp6], %[temp6], %[temp4] \n\t"
"mul.s %[temp7], %[temp7], %[temp4] \n\t"
"mul.s %[temp8], %[temp8], %[temp4] \n\t"
PTR_ADDIU "%[ret], %[dst], 16 \n\t"
"swc1 %[temp5], 0(%[dst]) \n\t"
"swc1 %[temp6], 4(%[dst]) \n\t"
"swc1 %[temp7], 8(%[dst]) \n\t"
"swc1 %[temp8], 12(%[dst]) \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
[temp8]"=&f"(temp8), [ret]"=&r"(ret)
: [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
[dst]"r"(dst)
: "memory"
);
return ret;
}
static inline float *VMUL2S_mips(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
int temp0, temp1, temp2, temp3, temp4, temp5;
float temp6, temp7, temp8, temp9;
float *ret;
__asm__ volatile(
"andi %[temp0], %[idx], 0x0F \n\t"
"andi %[temp1], %[idx], 0xF0 \n\t"
"lw %[temp4], 0(%[scale]) \n\t"
"srl %[temp2], %[sign], 1 \n\t"
"sll %[temp3], %[sign], 31 \n\t"
"sll %[temp2], %[temp2], 31 \n\t"
"sll %[temp0], %[temp0], 2 \n\t"
"srl %[temp1], %[temp1], 2 \n\t"
"lwxc1 %[temp8], %[temp0](%[v]) \n\t"
"lwxc1 %[temp9], %[temp1](%[v]) \n\t"
"xor %[temp5], %[temp4], %[temp2] \n\t"
"xor %[temp4], %[temp4], %[temp3] \n\t"
"mtc1 %[temp5], %[temp6] \n\t"
"mtc1 %[temp4], %[temp7] \n\t"
"mul.s %[temp8], %[temp8], %[temp6] \n\t"
"mul.s %[temp9], %[temp9], %[temp7] \n\t"
PTR_ADDIU "%[ret], %[dst], 8 \n\t"
"swc1 %[temp8], 0(%[dst]) \n\t"
"swc1 %[temp9], 4(%[dst]) \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
[temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
[ret]"=&r"(ret)
: [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
[dst]"r"(dst), [sign]"r"(sign)
: "memory"
);
return ret;
}
static inline float *VMUL4S_mips(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
int temp0, temp1, temp2, temp3, temp4;
float temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17;
float *ret;
unsigned int mask = 1U << 31;
__asm__ volatile(
"lw %[temp0], 0(%[scale]) \n\t"
"andi %[temp1], %[idx], 0x03 \n\t"
"andi %[temp2], %[idx], 0x0C \n\t"
"andi %[temp3], %[idx], 0x30 \n\t"
"andi %[temp4], %[idx], 0xC0 \n\t"
"sll %[temp1], %[temp1], 2 \n\t"
"srl %[temp3], %[temp3], 2 \n\t"
"srl %[temp4], %[temp4], 4 \n\t"
"lwxc1 %[temp10], %[temp1](%[v]) \n\t"
"lwxc1 %[temp11], %[temp2](%[v]) \n\t"
"lwxc1 %[temp12], %[temp3](%[v]) \n\t"
"lwxc1 %[temp13], %[temp4](%[v]) \n\t"
"and %[temp1], %[sign], %[mask] \n\t"
"srl %[temp2], %[idx], 12 \n\t"
"srl %[temp3], %[idx], 13 \n\t"
"srl %[temp4], %[idx], 14 \n\t"
"andi %[temp2], %[temp2], 1 \n\t"
"andi %[temp3], %[temp3], 1 \n\t"
"andi %[temp4], %[temp4], 1 \n\t"
"sllv %[sign], %[sign], %[temp2] \n\t"
"xor %[temp1], %[temp0], %[temp1] \n\t"
"and %[temp2], %[sign], %[mask] \n\t"
"mtc1 %[temp1], %[temp14] \n\t"
"xor %[temp2], %[temp0], %[temp2] \n\t"
"sllv %[sign], %[sign], %[temp3] \n\t"
"mtc1 %[temp2], %[temp15] \n\t"
"and %[temp3], %[sign], %[mask] \n\t"
"sllv %[sign], %[sign], %[temp4] \n\t"
"xor %[temp3], %[temp0], %[temp3] \n\t"
"and %[temp4], %[sign], %[mask] \n\t"
"mtc1 %[temp3], %[temp16] \n\t"
"xor %[temp4], %[temp0], %[temp4] \n\t"
"mtc1 %[temp4], %[temp17] \n\t"
"mul.s %[temp10], %[temp10], %[temp14] \n\t"
"mul.s %[temp11], %[temp11], %[temp15] \n\t"
"mul.s %[temp12], %[temp12], %[temp16] \n\t"
"mul.s %[temp13], %[temp13], %[temp17] \n\t"
PTR_ADDIU "%[ret], %[dst], 16 \n\t"
"swc1 %[temp10], 0(%[dst]) \n\t"
"swc1 %[temp11], 4(%[dst]) \n\t"
"swc1 %[temp12], 8(%[dst]) \n\t"
"swc1 %[temp13], 12(%[dst]) \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp10]"=&f"(temp10),
[temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
[temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
[temp15]"=&f"(temp15), [temp16]"=&f"(temp16),
[temp17]"=&f"(temp17), [ret]"=&r"(ret),
[sign]"+r"(sign)
: [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
[dst]"r"(dst), [mask]"r"(mask)
: "memory"
);
return ret;
}
#define VMUL2 VMUL2_mips
#define VMUL4 VMUL4_mips
#define VMUL2S VMUL2S_mips
#define VMUL4S VMUL4S_mips
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
#endif /* AVCODEC_MIPS_AACDEC_MIPS_H */

View File

@ -1,465 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Darko Laus (darko@mips.com)
* Djordje Pesut (djordje@mips.com)
* Mirjana Vulin (mvulin@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/aacpsdsp.c
*/
#include "config.h"
#include "libavcodec/aacpsdsp.h"
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
static void ps_hybrid_analysis_ileave_mips(float (*out)[32][2], float L[2][38][64],
int i, int len)
{
int temp0, temp1, temp2, temp3;
int temp4, temp5, temp6, temp7;
float *out1=&out[i][0][0];
float *L1=&L[0][0][i];
float *j=out1+ len*2;
for (; i < 64; i++) {
/* loop unrolled 8 times */
__asm__ volatile (
"1: \n\t"
"lw %[temp0], 0(%[L1]) \n\t"
"lw %[temp1], 9728(%[L1]) \n\t"
"lw %[temp2], 256(%[L1]) \n\t"
"lw %[temp3], 9984(%[L1]) \n\t"
"lw %[temp4], 512(%[L1]) \n\t"
"lw %[temp5], 10240(%[L1]) \n\t"
"lw %[temp6], 768(%[L1]) \n\t"
"lw %[temp7], 10496(%[L1]) \n\t"
"sw %[temp0], 0(%[out1]) \n\t"
"sw %[temp1], 4(%[out1]) \n\t"
"sw %[temp2], 8(%[out1]) \n\t"
"sw %[temp3], 12(%[out1]) \n\t"
"sw %[temp4], 16(%[out1]) \n\t"
"sw %[temp5], 20(%[out1]) \n\t"
"sw %[temp6], 24(%[out1]) \n\t"
"sw %[temp7], 28(%[out1]) \n\t"
PTR_ADDIU "%[out1], %[out1], 32 \n\t"
PTR_ADDIU "%[L1], %[L1], 1024 \n\t"
"bne %[out1], %[j], 1b \n\t"
: [out1]"+r"(out1), [L1]"+r"(L1), [j]"+r"(j),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
: [len]"r"(len)
: "memory"
);
out1-=(len<<1)-64;
L1-=(len<<6)-1;
j+=len*2;
}
}
static void ps_hybrid_synthesis_deint_mips(float out[2][38][64],
float (*in)[32][2],
int i, int len)
{
int n;
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
float *out1 = (float*)out + i;
float *out2 = (float*)out + 2432 + i;
float *in1 = (float*)in + 64 * i;
float *in2 = (float*)in + 64 * i + 1;
for (; i < 64; i++) {
for (n = 0; n < 7; n++) {
/* loop unrolled 8 times */
__asm__ volatile (
"lw %[temp0], 0(%[in1]) \n\t"
"lw %[temp1], 0(%[in2]) \n\t"
"lw %[temp2], 8(%[in1]) \n\t"
"lw %[temp3], 8(%[in2]) \n\t"
"lw %[temp4], 16(%[in1]) \n\t"
"lw %[temp5], 16(%[in2]) \n\t"
"lw %[temp6], 24(%[in1]) \n\t"
"lw %[temp7], 24(%[in2]) \n\t"
PTR_ADDIU "%[out1], %[out1], 1024 \n\t"
PTR_ADDIU "%[out2], %[out2], 1024 \n\t"
PTR_ADDIU "%[in1], %[in1], 32 \n\t"
PTR_ADDIU "%[in2], %[in2], 32 \n\t"
"sw %[temp0], -1024(%[out1]) \n\t"
"sw %[temp1], -1024(%[out2]) \n\t"
"sw %[temp2], -768(%[out1]) \n\t"
"sw %[temp3], -768(%[out2]) \n\t"
"sw %[temp4], -512(%[out1]) \n\t"
"sw %[temp5], -512(%[out2]) \n\t"
"sw %[temp6], -256(%[out1]) \n\t"
"sw %[temp7], -256(%[out2]) \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
[out1]"+r"(out1), [out2]"+r"(out2),
[in1]"+r"(in1), [in2]"+r"(in2)
:
: "memory"
);
}
/* loop unrolled 8 times */
__asm__ volatile (
"lw %[temp0], 0(%[in1]) \n\t"
"lw %[temp1], 0(%[in2]) \n\t"
"lw %[temp2], 8(%[in1]) \n\t"
"lw %[temp3], 8(%[in2]) \n\t"
"lw %[temp4], 16(%[in1]) \n\t"
"lw %[temp5], 16(%[in2]) \n\t"
"lw %[temp6], 24(%[in1]) \n\t"
"lw %[temp7], 24(%[in2]) \n\t"
PTR_ADDIU "%[out1], %[out1], -7164 \n\t"
PTR_ADDIU "%[out2], %[out2], -7164 \n\t"
PTR_ADDIU "%[in1], %[in1], 32 \n\t"
PTR_ADDIU "%[in2], %[in2], 32 \n\t"
"sw %[temp0], 7164(%[out1]) \n\t"
"sw %[temp1], 7164(%[out2]) \n\t"
"sw %[temp2], 7420(%[out1]) \n\t"
"sw %[temp3], 7420(%[out2]) \n\t"
"sw %[temp4], 7676(%[out1]) \n\t"
"sw %[temp5], 7676(%[out2]) \n\t"
"sw %[temp6], 7932(%[out1]) \n\t"
"sw %[temp7], 7932(%[out2]) \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
[out1]"+r"(out1), [out2]"+r"(out2),
[in1]"+r"(in1), [in2]"+r"(in2)
:
: "memory"
);
}
}
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static void ps_add_squares_mips(float *dst, const float (*src)[2], int n)
{
int i;
float temp0, temp1, temp2, temp3, temp4, temp5;
float temp6, temp7, temp8, temp9, temp10, temp11;
float *src0 = (float*)&src[0][0];
float *dst0 = &dst[0];
for (i = 0; i < 8; i++) {
/* loop unrolled 4 times */
__asm__ volatile (
"lwc1 %[temp0], 0(%[src0]) \n\t"
"lwc1 %[temp1], 4(%[src0]) \n\t"
"lwc1 %[temp2], 8(%[src0]) \n\t"
"lwc1 %[temp3], 12(%[src0]) \n\t"
"lwc1 %[temp4], 16(%[src0]) \n\t"
"lwc1 %[temp5], 20(%[src0]) \n\t"
"lwc1 %[temp6], 24(%[src0]) \n\t"
"lwc1 %[temp7], 28(%[src0]) \n\t"
"lwc1 %[temp8], 0(%[dst0]) \n\t"
"lwc1 %[temp9], 4(%[dst0]) \n\t"
"lwc1 %[temp10], 8(%[dst0]) \n\t"
"lwc1 %[temp11], 12(%[dst0]) \n\t"
"mul.s %[temp1], %[temp1], %[temp1] \n\t"
"mul.s %[temp3], %[temp3], %[temp3] \n\t"
"mul.s %[temp5], %[temp5], %[temp5] \n\t"
"mul.s %[temp7], %[temp7], %[temp7] \n\t"
"madd.s %[temp0], %[temp1], %[temp0], %[temp0] \n\t"
"madd.s %[temp2], %[temp3], %[temp2], %[temp2] \n\t"
"madd.s %[temp4], %[temp5], %[temp4], %[temp4] \n\t"
"madd.s %[temp6], %[temp7], %[temp6], %[temp6] \n\t"
"add.s %[temp0], %[temp8], %[temp0] \n\t"
"add.s %[temp2], %[temp9], %[temp2] \n\t"
"add.s %[temp4], %[temp10], %[temp4] \n\t"
"add.s %[temp6], %[temp11], %[temp6] \n\t"
"swc1 %[temp0], 0(%[dst0]) \n\t"
"swc1 %[temp2], 4(%[dst0]) \n\t"
"swc1 %[temp4], 8(%[dst0]) \n\t"
"swc1 %[temp6], 12(%[dst0]) \n\t"
PTR_ADDIU "%[dst0], %[dst0], 16 \n\t"
PTR_ADDIU "%[src0], %[src0], 32 \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [dst0]"+r"(dst0), [src0]"+r"(src0),
[temp10]"=&f"(temp10), [temp11]"=&f"(temp11)
:
: "memory"
);
}
}
static void ps_mul_pair_single_mips(float (*dst)[2], float (*src0)[2], float *src1,
int n)
{
float temp0, temp1, temp2;
float *p_d, *p_s0, *p_s1, *end;
p_d = &dst[0][0];
p_s0 = &src0[0][0];
p_s1 = &src1[0];
end = p_s1 + n;
__asm__ volatile(
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lwc1 %[temp2], 0(%[p_s1]) \n\t"
"lwc1 %[temp0], 0(%[p_s0]) \n\t"
"lwc1 %[temp1], 4(%[p_s0]) \n\t"
PTR_ADDIU "%[p_d], %[p_d], 8 \n\t"
"mul.s %[temp0], %[temp0], %[temp2] \n\t"
"mul.s %[temp1], %[temp1], %[temp2] \n\t"
PTR_ADDIU "%[p_s0], %[p_s0], 8 \n\t"
"swc1 %[temp0], -8(%[p_d]) \n\t"
"swc1 %[temp1], -4(%[p_d]) \n\t"
"bne %[p_s1], %[end], 1b \n\t"
PTR_ADDIU "%[p_s1], %[p_s1], 4 \n\t"
".set pop \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
[temp2]"=&f"(temp2), [p_d]"+r"(p_d),
[p_s0]"+r"(p_s0), [p_s1]"+r"(p_s1)
: [end]"r"(end)
: "memory"
);
}
static void ps_decorrelate_mips(float (*out)[2], float (*delay)[2],
float (*ap_delay)[PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2],
const float phi_fract[2], const float (*Q_fract)[2],
const float *transient_gain,
float g_decay_slope,
int len)
{
float *p_delay = &delay[0][0];
float *p_out = &out[0][0];
float *p_ap_delay = &ap_delay[0][0][0];
const float *p_t_gain = transient_gain;
const float *p_Q_fract = &Q_fract[0][0];
float ag0, ag1, ag2;
float phi_fract0 = phi_fract[0];
float phi_fract1 = phi_fract[1];
float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
float f1, f2, f3;
float *p_delay_end = (p_delay + (len << 1));
/* merged 2 loops */
f1 = 0.65143905753106;
f2 = 0.56471812200776;
f3 = 0.48954165955695;
__asm__ volatile(
".set push \n\t"
".set noreorder \n\t"
"mul.s %[ag0], %[ag0], %[g_decay_slope] \n\t"
"mul.s %[ag1], %[ag1], %[g_decay_slope] \n\t"
"mul.s %[ag2], %[ag2], %[g_decay_slope] \n\t"
"1: \n\t"
"lwc1 %[temp0], 0(%[p_delay]) \n\t"
"lwc1 %[temp1], 4(%[p_delay]) \n\t"
"lwc1 %[temp4], 16(%[p_ap_delay]) \n\t"
"lwc1 %[temp5], 20(%[p_ap_delay]) \n\t"
"mul.s %[temp3], %[temp0], %[phi_fract1] \n\t"
"lwc1 %[temp6], 0(%[p_Q_fract]) \n\t"
"mul.s %[temp2], %[temp1], %[phi_fract1] \n\t"
"lwc1 %[temp7], 4(%[p_Q_fract]) \n\t"
"madd.s %[temp3], %[temp3], %[temp1], %[phi_fract0] \n\t"
"msub.s %[temp2], %[temp2], %[temp0], %[phi_fract0] \n\t"
"mul.s %[temp8], %[temp5], %[temp7] \n\t"
"mul.s %[temp9], %[temp4], %[temp7] \n\t"
"lwc1 %[temp7], 12(%[p_Q_fract]) \n\t"
"mul.s %[temp0], %[ag0], %[temp2] \n\t"
"mul.s %[temp1], %[ag0], %[temp3] \n\t"
"msub.s %[temp8], %[temp8], %[temp4], %[temp6] \n\t"
"lwc1 %[temp4], 304(%[p_ap_delay]) \n\t"
"madd.s %[temp9], %[temp9], %[temp5], %[temp6] \n\t"
"lwc1 %[temp5], 308(%[p_ap_delay]) \n\t"
"sub.s %[temp0], %[temp8], %[temp0] \n\t"
"sub.s %[temp1], %[temp9], %[temp1] \n\t"
"madd.s %[temp2], %[temp2], %[ag0], %[temp0] \n\t"
"lwc1 %[temp6], 8(%[p_Q_fract]) \n\t"
"madd.s %[temp3], %[temp3], %[ag0], %[temp1] \n\t"
"mul.s %[temp8], %[temp5], %[temp7] \n\t"
"mul.s %[temp9], %[temp4], %[temp7] \n\t"
"lwc1 %[temp7], 20(%[p_Q_fract]) \n\t"
"msub.s %[temp8], %[temp8], %[temp4], %[temp6] \n\t"
"swc1 %[temp2], 40(%[p_ap_delay]) \n\t"
"mul.s %[temp2], %[ag1], %[temp0] \n\t"
"swc1 %[temp3], 44(%[p_ap_delay]) \n\t"
"mul.s %[temp3], %[ag1], %[temp1] \n\t"
"lwc1 %[temp4], 592(%[p_ap_delay]) \n\t"
"madd.s %[temp9], %[temp9], %[temp5], %[temp6] \n\t"
"lwc1 %[temp5], 596(%[p_ap_delay]) \n\t"
"sub.s %[temp2], %[temp8], %[temp2] \n\t"
"sub.s %[temp3], %[temp9], %[temp3] \n\t"
"lwc1 %[temp6], 16(%[p_Q_fract]) \n\t"
"madd.s %[temp0], %[temp0], %[ag1], %[temp2] \n\t"
"madd.s %[temp1], %[temp1], %[ag1], %[temp3] \n\t"
"mul.s %[temp8], %[temp5], %[temp7] \n\t"
"mul.s %[temp9], %[temp4], %[temp7] \n\t"
"msub.s %[temp8], %[temp8], %[temp4], %[temp6] \n\t"
"madd.s %[temp9], %[temp9], %[temp5], %[temp6] \n\t"
"swc1 %[temp0], 336(%[p_ap_delay]) \n\t"
"mul.s %[temp0], %[ag2], %[temp2] \n\t"
"swc1 %[temp1], 340(%[p_ap_delay]) \n\t"
"mul.s %[temp1], %[ag2], %[temp3] \n\t"
"lwc1 %[temp4], 0(%[p_t_gain]) \n\t"
"sub.s %[temp0], %[temp8], %[temp0] \n\t"
PTR_ADDIU "%[p_ap_delay], %[p_ap_delay], 8 \n\t"
"sub.s %[temp1], %[temp9], %[temp1] \n\t"
PTR_ADDIU "%[p_t_gain], %[p_t_gain], 4 \n\t"
"madd.s %[temp2], %[temp2], %[ag2], %[temp0] \n\t"
PTR_ADDIU "%[p_delay], %[p_delay], 8 \n\t"
"madd.s %[temp3], %[temp3], %[ag2], %[temp1] \n\t"
PTR_ADDIU "%[p_out], %[p_out], 8 \n\t"
"mul.s %[temp5], %[temp4], %[temp0] \n\t"
"mul.s %[temp6], %[temp4], %[temp1] \n\t"
"swc1 %[temp2], 624(%[p_ap_delay]) \n\t"
"swc1 %[temp3], 628(%[p_ap_delay]) \n\t"
"swc1 %[temp5], -8(%[p_out]) \n\t"
"swc1 %[temp6], -4(%[p_out]) \n\t"
"bne %[p_delay], %[p_delay_end],1b \n\t"
" swc1 %[temp6], -4(%[p_out]) \n\t"
".set pop \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [p_delay]"+r"(p_delay), [p_ap_delay]"+r"(p_ap_delay),
[p_Q_fract]"+r"(p_Q_fract), [p_t_gain]"+r"(p_t_gain), [p_out]"+r"(p_out)
: [phi_fract0]"f"(phi_fract0), [phi_fract1]"f"(phi_fract1),
[p_delay_end]"r"(p_delay_end), [g_decay_slope]"f"(g_decay_slope),
[ag0]"f"(f1), [ag1]"f"(f2), [ag2]"f"(f3)
: "memory"
);
}
static void ps_stereo_interpolate_mips(float (*l)[2], float (*r)[2],
float h[2][4], float h_step[2][4],
int len)
{
float h0 = h[0][0];
float h1 = h[0][1];
float h2 = h[0][2];
float h3 = h[0][3];
float hs0 = h_step[0][0];
float hs1 = h_step[0][1];
float hs2 = h_step[0][2];
float hs3 = h_step[0][3];
float temp0, temp1, temp2, temp3;
float l_re, l_im, r_re, r_im;
float *l_end = ((float *)l + (len << 1));
__asm__ volatile(
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"add.s %[h0], %[h0], %[hs0] \n\t"
"lwc1 %[l_re], 0(%[l]) \n\t"
"add.s %[h1], %[h1], %[hs1] \n\t"
"lwc1 %[r_re], 0(%[r]) \n\t"
"add.s %[h2], %[h2], %[hs2] \n\t"
"lwc1 %[l_im], 4(%[l]) \n\t"
"add.s %[h3], %[h3], %[hs3] \n\t"
"lwc1 %[r_im], 4(%[r]) \n\t"
"mul.s %[temp0], %[h0], %[l_re] \n\t"
PTR_ADDIU "%[l], %[l], 8 \n\t"
"mul.s %[temp2], %[h1], %[l_re] \n\t"
PTR_ADDIU "%[r], %[r], 8 \n\t"
"madd.s %[temp0], %[temp0], %[h2], %[r_re] \n\t"
"madd.s %[temp2], %[temp2], %[h3], %[r_re] \n\t"
"mul.s %[temp1], %[h0], %[l_im] \n\t"
"mul.s %[temp3], %[h1], %[l_im] \n\t"
"madd.s %[temp1], %[temp1], %[h2], %[r_im] \n\t"
"madd.s %[temp3], %[temp3], %[h3], %[r_im] \n\t"
"swc1 %[temp0], -8(%[l]) \n\t"
"swc1 %[temp2], -8(%[r]) \n\t"
"swc1 %[temp1], -4(%[l]) \n\t"
"bne %[l], %[l_end], 1b \n\t"
" swc1 %[temp3], -4(%[r]) \n\t"
".set pop \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
[temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
[h0]"+f"(h0), [h1]"+f"(h1), [h2]"+f"(h2),
[h3]"+f"(h3), [l]"+r"(l), [r]"+r"(r),
[l_re]"=&f"(l_re), [l_im]"=&f"(l_im),
[r_re]"=&f"(r_re), [r_im]"=&f"(r_im)
: [hs0]"f"(hs0), [hs1]"f"(hs1), [hs2]"f"(hs2),
[hs3]"f"(hs3), [l_end]"r"(l_end)
: "memory"
);
}
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
void ff_psdsp_init_mips(PSDSPContext *s)
{
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
s->hybrid_analysis_ileave = ps_hybrid_analysis_ileave_mips;
s->hybrid_synthesis_deint = ps_hybrid_synthesis_deint_mips;
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
s->add_squares = ps_add_squares_mips;
s->mul_pair_single = ps_mul_pair_single_mips;
s->decorrelate = ps_decorrelate_mips;
s->stereo_interpolate[0] = ps_stereo_interpolate_mips;
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
}

View File

@ -1,238 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Bojan Zivkovic (bojan@mips.com)
*
* AAC encoder psychoacoustic model routines optimized
* for MIPS floating-point architecture
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/aacpsy.c
*/
#ifndef AVCODEC_MIPS_AACPSY_MIPS_H
#define AVCODEC_MIPS_AACPSY_MIPS_H
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM && HAVE_MIPSFPU && ( PSY_LAME_FIR_LEN == 21 )
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static void calc_thr_3gpp_mips(const FFPsyWindowInfo *wi, const int num_bands,
AacPsyChannel *pch, const uint8_t *band_sizes,
const float *coefs, const int cutoff)
{
int i, w, g;
int start = 0, wstart = 0;
for (w = 0; w < wi->num_windows*16; w += 16) {
wstart = 0;
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g];
float form_factor = 0.0f;
float Temp;
band->energy = 0.0f;
if (wstart < cutoff) {
for (i = 0; i < band_sizes[g]; i+=4) {
float a, b, c, d;
float ax, bx, cx, dx;
float *cf = (float *)&coefs[start+i];
__asm__ volatile (
"lwc1 %[a], 0(%[cf]) \n\t"
"lwc1 %[b], 4(%[cf]) \n\t"
"lwc1 %[c], 8(%[cf]) \n\t"
"lwc1 %[d], 12(%[cf]) \n\t"
"abs.s %[a], %[a] \n\t"
"abs.s %[b], %[b] \n\t"
"abs.s %[c], %[c] \n\t"
"abs.s %[d], %[d] \n\t"
"sqrt.s %[ax], %[a] \n\t"
"sqrt.s %[bx], %[b] \n\t"
"sqrt.s %[cx], %[c] \n\t"
"sqrt.s %[dx], %[d] \n\t"
"madd.s %[e], %[e], %[a], %[a] \n\t"
"madd.s %[e], %[e], %[b], %[b] \n\t"
"madd.s %[e], %[e], %[c], %[c] \n\t"
"madd.s %[e], %[e], %[d], %[d] \n\t"
"add.s %[f], %[f], %[ax] \n\t"
"add.s %[f], %[f], %[bx] \n\t"
"add.s %[f], %[f], %[cx] \n\t"
"add.s %[f], %[f], %[dx] \n\t"
: [a]"=&f"(a), [b]"=&f"(b),
[c]"=&f"(c), [d]"=&f"(d),
[e]"+f"(band->energy), [f]"+f"(form_factor),
[ax]"=&f"(ax), [bx]"=&f"(bx),
[cx]"=&f"(cx), [dx]"=&f"(dx)
: [cf]"r"(cf)
: "memory"
);
}
}
Temp = sqrtf((float)band_sizes[g] / band->energy);
band->thr = band->energy * 0.001258925f;
band->nz_lines = form_factor * sqrtf(Temp);
start += band_sizes[g];
wstart += band_sizes[g];
}
}
}
static void psy_hp_filter_mips(const float *firbuf, float *hpfsmpl, const float * psy_fir_coeffs)
{
float sum1, sum2, sum3, sum4;
float *fb = (float*)firbuf;
float *fb_end = fb + AAC_BLOCK_SIZE_LONG;
float *hp = hpfsmpl;
float coeff0 = psy_fir_coeffs[1];
float coeff1 = psy_fir_coeffs[3];
float coeff2 = psy_fir_coeffs[5];
float coeff3 = psy_fir_coeffs[7];
float coeff4 = psy_fir_coeffs[9];
float f1 = 32768.0;
__asm__ volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lwc1 $f0, 40(%[fb]) \n\t"
"lwc1 $f1, 4(%[fb]) \n\t"
"lwc1 $f2, 80(%[fb]) \n\t"
"lwc1 $f3, 44(%[fb]) \n\t"
"lwc1 $f4, 8(%[fb]) \n\t"
"madd.s %[sum1], $f0, $f1, %[coeff0] \n\t"
"lwc1 $f5, 84(%[fb]) \n\t"
"lwc1 $f6, 48(%[fb]) \n\t"
"madd.s %[sum2], $f3, $f4, %[coeff0] \n\t"
"lwc1 $f7, 12(%[fb]) \n\t"
"madd.s %[sum1], %[sum1], $f2, %[coeff0] \n\t"
"lwc1 $f8, 88(%[fb]) \n\t"
"lwc1 $f9, 52(%[fb]) \n\t"
"madd.s %[sum2], %[sum2], $f5, %[coeff0] \n\t"
"madd.s %[sum3], $f6, $f7, %[coeff0] \n\t"
"lwc1 $f10, 16(%[fb]) \n\t"
"lwc1 $f11, 92(%[fb]) \n\t"
"madd.s %[sum1], %[sum1], $f7, %[coeff1] \n\t"
"lwc1 $f1, 72(%[fb]) \n\t"
"madd.s %[sum3], %[sum3], $f8, %[coeff0] \n\t"
"madd.s %[sum4], $f9, $f10, %[coeff0] \n\t"
"madd.s %[sum2], %[sum2], $f10, %[coeff1] \n\t"
"madd.s %[sum1], %[sum1], $f1, %[coeff1] \n\t"
"lwc1 $f4, 76(%[fb]) \n\t"
"lwc1 $f8, 20(%[fb]) \n\t"
"madd.s %[sum4], %[sum4], $f11, %[coeff0] \n\t"
"lwc1 $f11, 24(%[fb]) \n\t"
"madd.s %[sum2], %[sum2], $f4, %[coeff1] \n\t"
"madd.s %[sum1], %[sum1], $f8, %[coeff2] \n\t"
"madd.s %[sum3], %[sum3], $f8, %[coeff1] \n\t"
"madd.s %[sum4], %[sum4], $f11, %[coeff1] \n\t"
"lwc1 $f7, 64(%[fb]) \n\t"
"madd.s %[sum2], %[sum2], $f11, %[coeff2] \n\t"
"lwc1 $f10, 68(%[fb]) \n\t"
"madd.s %[sum3], %[sum3], $f2, %[coeff1] \n\t"
"madd.s %[sum4], %[sum4], $f5, %[coeff1] \n\t"
"madd.s %[sum1], %[sum1], $f7, %[coeff2] \n\t"
"madd.s %[sum2], %[sum2], $f10, %[coeff2] \n\t"
"lwc1 $f2, 28(%[fb]) \n\t"
"lwc1 $f5, 32(%[fb]) \n\t"
"lwc1 $f8, 56(%[fb]) \n\t"
"lwc1 $f11, 60(%[fb]) \n\t"
"madd.s %[sum3], %[sum3], $f2, %[coeff2] \n\t"
"madd.s %[sum4], %[sum4], $f5, %[coeff2] \n\t"
"madd.s %[sum1], %[sum1], $f2, %[coeff3] \n\t"
"madd.s %[sum2], %[sum2], $f5, %[coeff3] \n\t"
"madd.s %[sum3], %[sum3], $f1, %[coeff2] \n\t"
"madd.s %[sum4], %[sum4], $f4, %[coeff2] \n\t"
"madd.s %[sum1], %[sum1], $f8, %[coeff3] \n\t"
"madd.s %[sum2], %[sum2], $f11, %[coeff3] \n\t"
"lwc1 $f1, 36(%[fb]) \n\t"
PTR_ADDIU "%[fb], %[fb], 16 \n\t"
"madd.s %[sum4], %[sum4], $f0, %[coeff3] \n\t"
"madd.s %[sum3], %[sum3], $f1, %[coeff3] \n\t"
"madd.s %[sum1], %[sum1], $f1, %[coeff4] \n\t"
"madd.s %[sum2], %[sum2], $f0, %[coeff4] \n\t"
"madd.s %[sum4], %[sum4], $f10, %[coeff3] \n\t"
"madd.s %[sum3], %[sum3], $f7, %[coeff3] \n\t"
"madd.s %[sum1], %[sum1], $f6, %[coeff4] \n\t"
"madd.s %[sum2], %[sum2], $f9, %[coeff4] \n\t"
"madd.s %[sum4], %[sum4], $f6, %[coeff4] \n\t"
"madd.s %[sum3], %[sum3], $f3, %[coeff4] \n\t"
"mul.s %[sum1], %[sum1], %[f1] \n\t"
"mul.s %[sum2], %[sum2], %[f1] \n\t"
"madd.s %[sum4], %[sum4], $f11, %[coeff4] \n\t"
"madd.s %[sum3], %[sum3], $f8, %[coeff4] \n\t"
"swc1 %[sum1], 0(%[hp]) \n\t"
"swc1 %[sum2], 4(%[hp]) \n\t"
"mul.s %[sum4], %[sum4], %[f1] \n\t"
"mul.s %[sum3], %[sum3], %[f1] \n\t"
"swc1 %[sum4], 12(%[hp]) \n\t"
"swc1 %[sum3], 8(%[hp]) \n\t"
"bne %[fb], %[fb_end], 1b \n\t"
PTR_ADDIU "%[hp], %[hp], 16 \n\t"
".set pop \n\t"
: [sum1]"=&f"(sum1), [sum2]"=&f"(sum2),
[sum3]"=&f"(sum3), [sum4]"=&f"(sum4),
[fb]"+r"(fb), [hp]"+r"(hp)
: [coeff0]"f"(coeff0), [coeff1]"f"(coeff1),
[coeff2]"f"(coeff2), [coeff3]"f"(coeff3),
[coeff4]"f"(coeff4), [fb_end]"r"(fb_end), [f1]"f"(f1)
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6",
"$f7", "$f8", "$f9", "$f10", "$f11",
"memory"
);
}
#define calc_thr_3gpp calc_thr_3gpp_mips
#define psy_hp_filter psy_hp_filter_mips
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
#endif /* AVCODEC_MIPS_AACPSY_MIPS_H */

View File

@ -1,625 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Djordje Pesut (djordje@mips.com)
* Mirjana Vulin (mvulin@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/aacsbr.c
*/
#include "libavcodec/aacdec.h"
#include "libavcodec/aacsbr.h"
#include "libavcodec/sbr.h"
#include "libavutil/mem_internal.h"
#include "libavutil/mips/asmdefs.h"
#define ENVELOPE_ADJUSTMENT_OFFSET 2
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
static int sbr_lf_gen_mips(SpectralBandReplication *sbr,
float X_low[32][40][2], const float W[2][32][32][2],
int buf_idx)
{
int i, k;
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
float *p_x_low = &X_low[0][8][0];
float *p_w = (float*)&W[buf_idx][0][0][0];
float *p_x1_low = &X_low[0][0][0];
float *p_w1 = (float*)&W[1-buf_idx][24][0][0];
float *loop_end=p_x1_low + 2560;
/* loop unrolled 8 times */
__asm__ volatile (
"1: \n\t"
"sw $0, 0(%[p_x1_low]) \n\t"
"sw $0, 4(%[p_x1_low]) \n\t"
"sw $0, 8(%[p_x1_low]) \n\t"
"sw $0, 12(%[p_x1_low]) \n\t"
"sw $0, 16(%[p_x1_low]) \n\t"
"sw $0, 20(%[p_x1_low]) \n\t"
"sw $0, 24(%[p_x1_low]) \n\t"
"sw $0, 28(%[p_x1_low]) \n\t"
PTR_ADDIU "%[p_x1_low],%[p_x1_low], 32 \n\t"
"bne %[p_x1_low], %[loop_end], 1b \n\t"
PTR_ADDIU "%[p_x1_low],%[p_x1_low], -10240 \n\t"
: [p_x1_low]"+r"(p_x1_low)
: [loop_end]"r"(loop_end)
: "memory"
);
for (k = 0; k < sbr->kx[1]; k++) {
for (i = 0; i < 32; i+=4) {
/* loop unrolled 4 times */
__asm__ volatile (
"lw %[temp0], 0(%[p_w]) \n\t"
"lw %[temp1], 4(%[p_w]) \n\t"
"lw %[temp2], 256(%[p_w]) \n\t"
"lw %[temp3], 260(%[p_w]) \n\t"
"lw %[temp4], 512(%[p_w]) \n\t"
"lw %[temp5], 516(%[p_w]) \n\t"
"lw %[temp6], 768(%[p_w]) \n\t"
"lw %[temp7], 772(%[p_w]) \n\t"
"sw %[temp0], 0(%[p_x_low]) \n\t"
"sw %[temp1], 4(%[p_x_low]) \n\t"
"sw %[temp2], 8(%[p_x_low]) \n\t"
"sw %[temp3], 12(%[p_x_low]) \n\t"
"sw %[temp4], 16(%[p_x_low]) \n\t"
"sw %[temp5], 20(%[p_x_low]) \n\t"
"sw %[temp6], 24(%[p_x_low]) \n\t"
"sw %[temp7], 28(%[p_x_low]) \n\t"
PTR_ADDIU "%[p_x_low], %[p_x_low], 32 \n\t"
PTR_ADDIU "%[p_w], %[p_w], 1024 \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
[p_w]"+r"(p_w), [p_x_low]"+r"(p_x_low)
:
: "memory"
);
}
p_x_low += 16;
p_w -= 2046;
}
for (k = 0; k < sbr->kx[0]; k++) {
for (i = 0; i < 2; i++) {
/* loop unrolled 4 times */
__asm__ volatile (
"lw %[temp0], 0(%[p_w1]) \n\t"
"lw %[temp1], 4(%[p_w1]) \n\t"
"lw %[temp2], 256(%[p_w1]) \n\t"
"lw %[temp3], 260(%[p_w1]) \n\t"
"lw %[temp4], 512(%[p_w1]) \n\t"
"lw %[temp5], 516(%[p_w1]) \n\t"
"lw %[temp6], 768(%[p_w1]) \n\t"
"lw %[temp7], 772(%[p_w1]) \n\t"
"sw %[temp0], 0(%[p_x1_low]) \n\t"
"sw %[temp1], 4(%[p_x1_low]) \n\t"
"sw %[temp2], 8(%[p_x1_low]) \n\t"
"sw %[temp3], 12(%[p_x1_low]) \n\t"
"sw %[temp4], 16(%[p_x1_low]) \n\t"
"sw %[temp5], 20(%[p_x1_low]) \n\t"
"sw %[temp6], 24(%[p_x1_low]) \n\t"
"sw %[temp7], 28(%[p_x1_low]) \n\t"
PTR_ADDIU "%[p_x1_low], %[p_x1_low], 32 \n\t"
PTR_ADDIU "%[p_w1], %[p_w1], 1024 \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
[p_w1]"+r"(p_w1), [p_x1_low]"+r"(p_x1_low)
:
: "memory"
);
}
p_x1_low += 64;
p_w1 -= 510;
}
return 0;
}
static int sbr_x_gen_mips(SpectralBandReplication *sbr, float X[2][38][64],
const float Y0[38][64][2], const float Y1[38][64][2],
const float X_low[32][40][2], int ch)
{
int k, i;
const int i_f = 32;
int temp0, temp1, temp2, temp3;
const float *X_low1, *Y01, *Y11;
float *x1=&X[0][0][0];
float *j=x1+4864;
const int i_Temp = FFMAX(2*sbr->data[ch].t_env_num_env_old - i_f, 0);
/* loop unrolled 8 times */
__asm__ volatile (
"1: \n\t"
"sw $0, 0(%[x1]) \n\t"
"sw $0, 4(%[x1]) \n\t"
"sw $0, 8(%[x1]) \n\t"
"sw $0, 12(%[x1]) \n\t"
"sw $0, 16(%[x1]) \n\t"
"sw $0, 20(%[x1]) \n\t"
"sw $0, 24(%[x1]) \n\t"
"sw $0, 28(%[x1]) \n\t"
PTR_ADDIU "%[x1],%[x1], 32 \n\t"
"bne %[x1], %[j], 1b \n\t"
PTR_ADDIU "%[x1],%[x1], -19456 \n\t"
: [x1]"+r"(x1)
: [j]"r"(j)
: "memory"
);
if (i_Temp != 0) {
X_low1=&X_low[0][2][0];
for (k = 0; k < sbr->kx[0]; k++) {
__asm__ volatile (
"move %[i], $zero \n\t"
"2: \n\t"
"lw %[temp0], 0(%[X_low1]) \n\t"
"lw %[temp1], 4(%[X_low1]) \n\t"
"sw %[temp0], 0(%[x1]) \n\t"
"sw %[temp1], 9728(%[x1]) \n\t"
PTR_ADDIU "%[x1], %[x1], 256 \n\t"
PTR_ADDIU "%[X_low1], %[X_low1], 8 \n\t"
"addiu %[i], %[i], 1 \n\t"
"bne %[i], %[i_Temp], 2b \n\t"
: [x1]"+r"(x1), [X_low1]"+r"(X_low1), [i]"=&r"(i),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
: [i_Temp]"r"(i_Temp)
: "memory"
);
x1-=(i_Temp<<6)-1;
X_low1-=(i_Temp<<1)-80;
}
x1=&X[0][0][k];
Y01=(float*)&Y0[32][k][0];
for (; k < sbr->kx[0] + sbr->m[0]; k++) {
__asm__ volatile (
"move %[i], $zero \n\t"
"3: \n\t"
"lw %[temp0], 0(%[Y01]) \n\t"
"lw %[temp1], 4(%[Y01]) \n\t"
"sw %[temp0], 0(%[x1]) \n\t"
"sw %[temp1], 9728(%[x1]) \n\t"
PTR_ADDIU "%[x1], %[x1], 256 \n\t"
PTR_ADDIU "%[Y01], %[Y01], 512 \n\t"
"addiu %[i], %[i], 1 \n\t"
"bne %[i], %[i_Temp], 3b \n\t"
: [x1]"+r"(x1), [Y01]"+r"(Y01), [i]"=&r"(i),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
: [i_Temp]"r"(i_Temp)
: "memory"
);
x1 -=(i_Temp<<6)-1;
Y01 -=(i_Temp<<7)-2;
}
}
x1=&X[0][i_Temp][0];
X_low1=&X_low[0][i_Temp+2][0];
temp3=38;
for (k = 0; k < sbr->kx[1]; k++) {
__asm__ volatile (
"move %[i], %[i_Temp] \n\t"
"4: \n\t"
"lw %[temp0], 0(%[X_low1]) \n\t"
"lw %[temp1], 4(%[X_low1]) \n\t"
"sw %[temp0], 0(%[x1]) \n\t"
"sw %[temp1], 9728(%[x1]) \n\t"
PTR_ADDIU "%[x1], %[x1], 256 \n\t"
PTR_ADDIU "%[X_low1],%[X_low1], 8 \n\t"
"addiu %[i], %[i], 1 \n\t"
"bne %[i], %[temp3], 4b \n\t"
: [x1]"+r"(x1), [X_low1]"+r"(X_low1), [i]"=&r"(i),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2)
: [i_Temp]"r"(i_Temp), [temp3]"r"(temp3)
: "memory"
);
x1 -= ((38-i_Temp)<<6)-1;
X_low1 -= ((38-i_Temp)<<1)- 80;
}
x1=&X[0][i_Temp][k];
Y11=&Y1[i_Temp][k][0];
temp2=32;
for (; k < sbr->kx[1] + sbr->m[1]; k++) {
__asm__ volatile (
"move %[i], %[i_Temp] \n\t"
"5: \n\t"
"lw %[temp0], 0(%[Y11]) \n\t"
"lw %[temp1], 4(%[Y11]) \n\t"
"sw %[temp0], 0(%[x1]) \n\t"
"sw %[temp1], 9728(%[x1]) \n\t"
PTR_ADDIU "%[x1], %[x1], 256 \n\t"
PTR_ADDIU "%[Y11], %[Y11], 512 \n\t"
"addiu %[i], %[i], 1 \n\t"
"bne %[i], %[temp2], 5b \n\t"
: [x1]"+r"(x1), [Y11]"+r"(Y11), [i]"=&r"(i),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
: [i_Temp]"r"(i_Temp), [temp3]"r"(temp3),
[temp2]"r"(temp2)
: "memory"
);
x1 -= ((32-i_Temp)<<6)-1;
Y11 -= ((32-i_Temp)<<7)-2;
}
return 0;
}
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static void sbr_hf_assemble_mips(float Y1[38][64][2],
const float X_high[64][40][2],
SpectralBandReplication *sbr, SBRData *ch_data,
const int e_a[2])
{
int e, i, j, m;
const int h_SL = 4 * !sbr->bs_smoothing_mode;
const int kx = sbr->kx[1];
const int m_max = sbr->m[1];
static const float h_smooth[5] = {
0.33333333333333,
0.30150283239582,
0.21816949906249,
0.11516383427084,
0.03183050093751,
};
float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
int indexnoise = ch_data->f_indexnoise;
int indexsine = ch_data->f_indexsine;
float *g_temp1, *q_temp1, *pok, *pok1;
uint32_t temp1, temp2, temp3, temp4;
int size = m_max;
if (sbr->reset) {
for (i = 0; i < h_SL; i++) {
memcpy(g_temp[i + 2*ch_data->t_env[0]], sbr->gain[0], m_max * sizeof(sbr->gain[0][0]));
memcpy(q_temp[i + 2*ch_data->t_env[0]], sbr->q_m[0], m_max * sizeof(sbr->q_m[0][0]));
}
} else if (h_SL) {
memcpy(g_temp[2*ch_data->t_env[0]], g_temp[2*ch_data->t_env_num_env_old], 4*sizeof(g_temp[0]));
memcpy(q_temp[2*ch_data->t_env[0]], q_temp[2*ch_data->t_env_num_env_old], 4*sizeof(q_temp[0]));
}
for (e = 0; e < ch_data->bs_num_env; e++) {
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
g_temp1 = g_temp[h_SL + i];
pok = sbr->gain[e];
q_temp1 = q_temp[h_SL + i];
pok1 = sbr->q_m[e];
/* loop unrolled 4 times */
for (j=0; j<(size>>2); j++) {
__asm__ volatile (
"lw %[temp1], 0(%[pok]) \n\t"
"lw %[temp2], 4(%[pok]) \n\t"
"lw %[temp3], 8(%[pok]) \n\t"
"lw %[temp4], 12(%[pok]) \n\t"
"sw %[temp1], 0(%[g_temp1]) \n\t"
"sw %[temp2], 4(%[g_temp1]) \n\t"
"sw %[temp3], 8(%[g_temp1]) \n\t"
"sw %[temp4], 12(%[g_temp1]) \n\t"
"lw %[temp1], 0(%[pok1]) \n\t"
"lw %[temp2], 4(%[pok1]) \n\t"
"lw %[temp3], 8(%[pok1]) \n\t"
"lw %[temp4], 12(%[pok1]) \n\t"
"sw %[temp1], 0(%[q_temp1]) \n\t"
"sw %[temp2], 4(%[q_temp1]) \n\t"
"sw %[temp3], 8(%[q_temp1]) \n\t"
"sw %[temp4], 12(%[q_temp1]) \n\t"
PTR_ADDIU "%[pok], %[pok], 16 \n\t"
PTR_ADDIU "%[g_temp1], %[g_temp1], 16 \n\t"
PTR_ADDIU "%[pok1], %[pok1], 16 \n\t"
PTR_ADDIU "%[q_temp1], %[q_temp1], 16 \n\t"
: [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
[temp3]"=&r"(temp3), [temp4]"=&r"(temp4),
[pok]"+r"(pok), [g_temp1]"+r"(g_temp1),
[pok1]"+r"(pok1), [q_temp1]"+r"(q_temp1)
:
: "memory"
);
}
for (j=0; j<(size&3); j++) {
__asm__ volatile (
"lw %[temp1], 0(%[pok]) \n\t"
"lw %[temp2], 0(%[pok1]) \n\t"
"sw %[temp1], 0(%[g_temp1]) \n\t"
"sw %[temp2], 0(%[q_temp1]) \n\t"
PTR_ADDIU "%[pok], %[pok], 4 \n\t"
PTR_ADDIU "%[g_temp1], %[g_temp1], 4 \n\t"
PTR_ADDIU "%[pok1], %[pok1], 4 \n\t"
PTR_ADDIU "%[q_temp1], %[q_temp1], 4 \n\t"
: [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
[temp3]"=&r"(temp3), [temp4]"=&r"(temp4),
[pok]"+r"(pok), [g_temp1]"+r"(g_temp1),
[pok1]"+r"(pok1), [q_temp1]"+r"(q_temp1)
:
: "memory"
);
}
}
}
for (e = 0; e < ch_data->bs_num_env; e++) {
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
LOCAL_ALIGNED_16(float, g_filt_tab, [48]);
LOCAL_ALIGNED_16(float, q_filt_tab, [48]);
float *g_filt, *q_filt;
if (h_SL && e != e_a[0] && e != e_a[1]) {
g_filt = g_filt_tab;
q_filt = q_filt_tab;
for (m = 0; m < m_max; m++) {
const int idx1 = i + h_SL;
g_filt[m] = 0.0f;
q_filt[m] = 0.0f;
for (j = 0; j <= h_SL; j++) {
g_filt[m] += g_temp[idx1 - j][m] * h_smooth[j];
q_filt[m] += q_temp[idx1 - j][m] * h_smooth[j];
}
}
} else {
g_filt = g_temp[i + h_SL];
q_filt = q_temp[i];
}
sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max,
i + ENVELOPE_ADJUSTMENT_OFFSET);
if (e != e_a[0] && e != e_a[1]) {
sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e],
q_filt, indexnoise,
kx, m_max);
} else {
int idx = indexsine&1;
int A = (1-((indexsine+(kx & 1))&2));
int B = (A^(-idx)) + idx;
float *out = &Y1[i][kx][idx];
float *in = sbr->s_m[e];
float temp0, temp1, temp2, temp3, temp4, temp5;
float A_f = (float)A;
float B_f = (float)B;
for (m = 0; m+1 < m_max; m+=2) {
temp2 = out[0];
temp3 = out[2];
__asm__ volatile(
"lwc1 %[temp0], 0(%[in]) \n\t"
"lwc1 %[temp1], 4(%[in]) \n\t"
"madd.s %[temp4], %[temp2], %[temp0], %[A_f] \n\t"
"madd.s %[temp5], %[temp3], %[temp1], %[B_f] \n\t"
"swc1 %[temp4], 0(%[out]) \n\t"
"swc1 %[temp5], 8(%[out]) \n\t"
PTR_ADDIU "%[in], %[in], 8 \n\t"
PTR_ADDIU "%[out], %[out], 16 \n\t"
: [temp0]"=&f" (temp0), [temp1]"=&f"(temp1),
[temp4]"=&f" (temp4), [temp5]"=&f"(temp5),
[in]"+r"(in), [out]"+r"(out)
: [A_f]"f"(A_f), [B_f]"f"(B_f), [temp2]"f"(temp2),
[temp3]"f"(temp3)
: "memory"
);
}
if(m_max&1)
out[2*m ] += in[m ] * A;
}
indexnoise = (indexnoise + m_max) & 0x1ff;
indexsine = (indexsine + 1) & 3;
}
}
ch_data->f_indexnoise = indexnoise;
ch_data->f_indexsine = indexsine;
}
static void sbr_hf_inverse_filter_mips(SBRDSPContext *dsp,
float (*alpha0)[2], float (*alpha1)[2],
const float X_low[32][40][2], int k0)
{
int k;
float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, c;
float *phi1, *alpha_1, *alpha_0, res1, res2, temp_real, temp_im;
c = 1.000001f;
for (k = 0; k < k0; k++) {
LOCAL_ALIGNED_16(float, phi, [3], [2][2]);
float dk;
phi1 = &phi[0][0][0];
alpha_1 = &alpha1[k][0];
alpha_0 = &alpha0[k][0];
dsp->autocorrelate(X_low[k], phi);
__asm__ volatile (
"lwc1 %[temp0], 40(%[phi1]) \n\t"
"lwc1 %[temp1], 16(%[phi1]) \n\t"
"lwc1 %[temp2], 24(%[phi1]) \n\t"
"lwc1 %[temp3], 28(%[phi1]) \n\t"
"mul.s %[dk], %[temp0], %[temp1] \n\t"
"lwc1 %[temp4], 0(%[phi1]) \n\t"
"mul.s %[res2], %[temp2], %[temp2] \n\t"
"lwc1 %[temp5], 4(%[phi1]) \n\t"
"madd.s %[res2], %[res2], %[temp3], %[temp3] \n\t"
"lwc1 %[temp6], 8(%[phi1]) \n\t"
"div.s %[res2], %[res2], %[c] \n\t"
"lwc1 %[temp0], 12(%[phi1]) \n\t"
"sub.s %[dk], %[dk], %[res2] \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [res2]"=&f"(res2), [dk]"=&f"(dk)
: [phi1]"r"(phi1), [c]"f"(c)
: "memory"
);
if (!dk) {
alpha_1[0] = 0;
alpha_1[1] = 0;
} else {
__asm__ volatile (
"mul.s %[temp_real], %[temp4], %[temp2] \n\t"
"nmsub.s %[temp_real], %[temp_real], %[temp5], %[temp3] \n\t"
"nmsub.s %[temp_real], %[temp_real], %[temp6], %[temp1] \n\t"
"mul.s %[temp_im], %[temp4], %[temp3] \n\t"
"madd.s %[temp_im], %[temp_im], %[temp5], %[temp2] \n\t"
"nmsub.s %[temp_im], %[temp_im], %[temp0], %[temp1] \n\t"
"div.s %[temp_real], %[temp_real], %[dk] \n\t"
"div.s %[temp_im], %[temp_im], %[dk] \n\t"
"swc1 %[temp_real], 0(%[alpha_1]) \n\t"
"swc1 %[temp_im], 4(%[alpha_1]) \n\t"
: [temp_real]"=&f" (temp_real), [temp_im]"=&f"(temp_im)
: [phi1]"r"(phi1), [temp0]"f"(temp0), [temp1]"f"(temp1),
[temp2]"f"(temp2), [temp3]"f"(temp3), [temp4]"f"(temp4),
[temp5]"f"(temp5), [temp6]"f"(temp6),
[alpha_1]"r"(alpha_1), [dk]"f"(dk)
: "memory"
);
}
if (!phi1[4]) {
alpha_0[0] = 0;
alpha_0[1] = 0;
} else {
__asm__ volatile (
"lwc1 %[temp6], 0(%[alpha_1]) \n\t"
"lwc1 %[temp7], 4(%[alpha_1]) \n\t"
"mul.s %[temp_real], %[temp6], %[temp2] \n\t"
"add.s %[temp_real], %[temp_real], %[temp4] \n\t"
"madd.s %[temp_real], %[temp_real], %[temp7], %[temp3] \n\t"
"mul.s %[temp_im], %[temp7], %[temp2] \n\t"
"add.s %[temp_im], %[temp_im], %[temp5] \n\t"
"nmsub.s %[temp_im], %[temp_im], %[temp6], %[temp3] \n\t"
"div.s %[temp_real], %[temp_real], %[temp1] \n\t"
"div.s %[temp_im], %[temp_im], %[temp1] \n\t"
"neg.s %[temp_real], %[temp_real] \n\t"
"neg.s %[temp_im], %[temp_im] \n\t"
"swc1 %[temp_real], 0(%[alpha_0]) \n\t"
"swc1 %[temp_im], 4(%[alpha_0]) \n\t"
: [temp_real]"=&f"(temp_real), [temp_im]"=&f"(temp_im),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
[res1]"=&f"(res1), [res2]"=&f"(res2)
: [alpha_1]"r"(alpha_1), [alpha_0]"r"(alpha_0),
[temp0]"f"(temp0), [temp1]"f"(temp1), [temp2]"f"(temp2),
[temp3]"f"(temp3), [temp4]"f"(temp4), [temp5]"f"(temp5)
: "memory"
);
}
__asm__ volatile (
"lwc1 %[temp1], 0(%[alpha_1]) \n\t"
"lwc1 %[temp2], 4(%[alpha_1]) \n\t"
"lwc1 %[temp_real], 0(%[alpha_0]) \n\t"
"lwc1 %[temp_im], 4(%[alpha_0]) \n\t"
"mul.s %[res1], %[temp1], %[temp1] \n\t"
"madd.s %[res1], %[res1], %[temp2], %[temp2] \n\t"
"mul.s %[res2], %[temp_real], %[temp_real] \n\t"
"madd.s %[res2], %[res2], %[temp_im], %[temp_im] \n\t"
: [temp_real]"=&f"(temp_real), [temp_im]"=&f"(temp_im),
[temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[res1]"=&f"(res1), [res2]"=&f"(res2)
: [alpha_1]"r"(alpha_1), [alpha_0]"r"(alpha_0)
: "memory"
);
if (res1 >= 16.0f || res2 >= 16.0f) {
alpha_1[0] = 0;
alpha_1[1] = 0;
alpha_0[0] = 0;
alpha_0[1] = 0;
}
}
}
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
void ff_aacsbr_func_ptr_init_mips(AACSBRContext *c)
{
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
c->sbr_lf_gen = sbr_lf_gen_mips;
c->sbr_x_gen = sbr_x_gen_mips;
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
c->sbr_hf_inverse_filter = sbr_hf_inverse_filter_mips;
c->sbr_hf_assemble = sbr_hf_assemble_mips;
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
}

View File

@ -1,496 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Djordje Pesut (djordje@mips.com)
* Mirjana Vulin (mvulin@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/aacsbr.c
*/
#ifndef AVCODEC_MIPS_AACSBR_MIPS_H
#define AVCODEC_MIPS_AACSBR_MIPS_H
#include "libavcodec/aacdec.h"
#include "libavcodec/sbr.h"
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM
static void sbr_qmf_analysis_mips(AVFloatDSPContext *fdsp, AVTXContext *mdct, av_tx_fn mdct_fn,
SBRDSPContext *sbrdsp, const float *in, float *x,
float z[320], float W[2][32][32][2], int buf_idx)
{
int i;
float *w0;
float *w1;
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
w0 = x;
w1 = x + 1024;
for(i = 0; i < 36; i++)
{
/* loop unrolled 8 times */
__asm__ volatile(
"lw %[temp0], 0(%[w1]) \n\t"
"lw %[temp1], 4(%[w1]) \n\t"
"lw %[temp2], 8(%[w1]) \n\t"
"lw %[temp3], 12(%[w1]) \n\t"
"lw %[temp4], 16(%[w1]) \n\t"
"lw %[temp5], 20(%[w1]) \n\t"
"lw %[temp6], 24(%[w1]) \n\t"
"lw %[temp7], 28(%[w1]) \n\t"
"sw %[temp0], 0(%[w0]) \n\t"
"sw %[temp1], 4(%[w0]) \n\t"
"sw %[temp2], 8(%[w0]) \n\t"
"sw %[temp3], 12(%[w0]) \n\t"
"sw %[temp4], 16(%[w0]) \n\t"
"sw %[temp5], 20(%[w0]) \n\t"
"sw %[temp6], 24(%[w0]) \n\t"
"sw %[temp7], 28(%[w0]) \n\t"
PTR_ADDIU " %[w0], %[w0], 32 \n\t"
PTR_ADDIU " %[w1], %[w1], 32 \n\t"
: [w0]"+r"(w0), [w1]"+r"(w1),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
:
: "memory"
);
}
w0 = x + 288;
w1 = (float*)in;
for(i = 0; i < 128; i++)
{
/* loop unrolled 8 times */
__asm__ volatile(
"lw %[temp0], 0(%[w1]) \n\t"
"lw %[temp1], 4(%[w1]) \n\t"
"lw %[temp2], 8(%[w1]) \n\t"
"lw %[temp3], 12(%[w1]) \n\t"
"lw %[temp4], 16(%[w1]) \n\t"
"lw %[temp5], 20(%[w1]) \n\t"
"lw %[temp6], 24(%[w1]) \n\t"
"lw %[temp7], 28(%[w1]) \n\t"
"sw %[temp0], 0(%[w0]) \n\t"
"sw %[temp1], 4(%[w0]) \n\t"
"sw %[temp2], 8(%[w0]) \n\t"
"sw %[temp3], 12(%[w0]) \n\t"
"sw %[temp4], 16(%[w0]) \n\t"
"sw %[temp5], 20(%[w0]) \n\t"
"sw %[temp6], 24(%[w0]) \n\t"
"sw %[temp7], 28(%[w0]) \n\t"
PTR_ADDIU " %[w0], %[w0], 32 \n\t"
PTR_ADDIU " %[w1], %[w1], 32 \n\t"
: [w0]"+r"(w0), [w1]"+r"(w1),
[temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
[temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
[temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
[temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
:
: "memory"
);
}
for (i = 0; i < 32; i++) { // numTimeSlots*RATE = 16*2 as 960 sample frames
// are not supported
fdsp->vector_fmul_reverse(z, sbr_qmf_window_ds, x, 320);
sbrdsp->sum64x5(z);
sbrdsp->qmf_pre_shuffle(z);
mdct_fn(mdct, z, z+64, sizeof(float));
sbrdsp->qmf_post_shuffle(W[buf_idx][i], z);
x += 32;
}
}
#if HAVE_MIPSFPU
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static void sbr_qmf_synthesis_mips(AVTXContext *mdct, av_tx_fn mdct_fn,
SBRDSPContext *sbrdsp, AVFloatDSPContext *fdsp,
float *out, float X[2][38][64],
float mdct_buf[2][64],
float *v0, int *v_off, const unsigned int div)
{
int i, n;
const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
const int step = 128 >> div;
float *v;
float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10, temp11, temp12, temp13;
float temp14, temp15, temp16, temp17, temp18, temp19;
float *vv0, *s0, *dst;
dst = out;
for (i = 0; i < 32; i++) {
if (*v_off < step) {
int saved_samples = (1280 - 128) >> div;
memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
} else {
*v_off -= step;
}
v = v0 + *v_off;
if (div) {
for (n = 0; n < 32; n++) {
X[0][i][ n] = -X[0][i][n];
X[0][i][32+n] = X[1][i][31-n];
}
mdct_fn(mdct, mdct_buf[0], X[0][i], sizeof(float));
sbrdsp->qmf_deint_neg(v, mdct_buf[0]);
} else {
sbrdsp->neg_odd_64(X[1][i]);
mdct_fn(mdct, mdct_buf[0], X[0][i], sizeof(float));
mdct_fn(mdct, mdct_buf[1], X[1][i], sizeof(float));
sbrdsp->qmf_deint_bfly(v, mdct_buf[1], mdct_buf[0]);
}
if(div == 0)
{
float *v0_end;
vv0 = v;
v0_end = v + 60;
s0 = (float*)sbr_qmf_window;
/* 10 calls of function vector_fmul_add merged into one loop
and loop unrolled 4 times */
__asm__ volatile(
".set push \n\t"
".set noreorder \n\t"
"lwc1 %[temp4], 0(%[v0]) \n\t"
"lwc1 %[temp5], 0(%[s0]) \n\t"
"lwc1 %[temp6], 4(%[v0]) \n\t"
"lwc1 %[temp7], 4(%[s0]) \n\t"
"lwc1 %[temp8], 8(%[v0]) \n\t"
"lwc1 %[temp9], 8(%[s0]) \n\t"
"lwc1 %[temp10], 12(%[v0]) \n\t"
"lwc1 %[temp11], 12(%[s0]) \n\t"
"lwc1 %[temp12], 768(%[v0]) \n\t"
"lwc1 %[temp13], 256(%[s0]) \n\t"
"lwc1 %[temp14], 772(%[v0]) \n\t"
"lwc1 %[temp15], 260(%[s0]) \n\t"
"lwc1 %[temp16], 776(%[v0]) \n\t"
"lwc1 %[temp17], 264(%[s0]) \n\t"
"lwc1 %[temp18], 780(%[v0]) \n\t"
"lwc1 %[temp19], 268(%[s0]) \n\t"
"1: \n\t"
"mul.s %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 1024(%[v0]) \n\t"
"mul.s %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 512(%[s0]) \n\t"
"mul.s %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 1028(%[v0]) \n\t"
"mul.s %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 516(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 1032(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 520(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 1036(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 524(%[s0]) \n\t"
"lwc1 %[temp12], 1792(%[v0]) \n\t"
"lwc1 %[temp13], 768(%[s0]) \n\t"
"lwc1 %[temp14], 1796(%[v0]) \n\t"
"lwc1 %[temp15], 772(%[s0]) \n\t"
"lwc1 %[temp16], 1800(%[v0]) \n\t"
"lwc1 %[temp17], 776(%[s0]) \n\t"
"lwc1 %[temp18], 1804(%[v0]) \n\t"
"lwc1 %[temp19], 780(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 2048(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 1024(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 2052(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 1028(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 2056(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 1032(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 2060(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 1036(%[s0]) \n\t"
"lwc1 %[temp12], 2816(%[v0]) \n\t"
"lwc1 %[temp13], 1280(%[s0]) \n\t"
"lwc1 %[temp14], 2820(%[v0]) \n\t"
"lwc1 %[temp15], 1284(%[s0]) \n\t"
"lwc1 %[temp16], 2824(%[v0]) \n\t"
"lwc1 %[temp17], 1288(%[s0]) \n\t"
"lwc1 %[temp18], 2828(%[v0]) \n\t"
"lwc1 %[temp19], 1292(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 3072(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 1536(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 3076(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 1540(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 3080(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 1544(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 3084(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 1548(%[s0]) \n\t"
"lwc1 %[temp12], 3840(%[v0]) \n\t"
"lwc1 %[temp13], 1792(%[s0]) \n\t"
"lwc1 %[temp14], 3844(%[v0]) \n\t"
"lwc1 %[temp15], 1796(%[s0]) \n\t"
"lwc1 %[temp16], 3848(%[v0]) \n\t"
"lwc1 %[temp17], 1800(%[s0]) \n\t"
"lwc1 %[temp18], 3852(%[v0]) \n\t"
"lwc1 %[temp19], 1804(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 4096(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 2048(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 4100(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 2052(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 4104(%[v0]) \n\t"
PTR_ADDIU "%[dst], %[dst], 16 \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 2056(%[s0]) \n\t"
PTR_ADDIU " %[s0], %[s0], 16 \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 4108(%[v0]) \n\t"
PTR_ADDIU " %[v0], %[v0], 16 \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 2044(%[s0]) \n\t"
"lwc1 %[temp12], 4848(%[v0]) \n\t"
"lwc1 %[temp13], 2288(%[s0]) \n\t"
"lwc1 %[temp14], 4852(%[v0]) \n\t"
"lwc1 %[temp15], 2292(%[s0]) \n\t"
"lwc1 %[temp16], 4856(%[v0]) \n\t"
"lwc1 %[temp17], 2296(%[s0]) \n\t"
"lwc1 %[temp18], 4860(%[v0]) \n\t"
"lwc1 %[temp19], 2300(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 0(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 0(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 4(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 4(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 8(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 8(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 12(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 12(%[s0]) \n\t"
"lwc1 %[temp12], 768(%[v0]) \n\t"
"lwc1 %[temp13], 256(%[s0]) \n\t"
"lwc1 %[temp14], 772(%[v0]) \n\t"
"lwc1 %[temp15], 260(%[s0]) \n\t"
"lwc1 %[temp16], 776(%[v0]) \n\t"
"lwc1 %[temp17], 264(%[s0]) \n\t"
"lwc1 %[temp18], 780(%[v0]) \n\t"
"lwc1 %[temp19], 268(%[s0]) \n\t"
"swc1 %[temp0], -16(%[dst]) \n\t"
"swc1 %[temp1], -12(%[dst]) \n\t"
"swc1 %[temp2], -8(%[dst]) \n\t"
"bne %[v0], %[v0_end], 1b \n\t"
" swc1 %[temp3], -4(%[dst]) \n\t"
"mul.s %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 1024(%[v0]) \n\t"
"mul.s %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 512(%[s0]) \n\t"
"mul.s %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 1028(%[v0]) \n\t"
"mul.s %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 516(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 1032(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 520(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 1036(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 524(%[s0]) \n\t"
"lwc1 %[temp12], 1792(%[v0]) \n\t"
"lwc1 %[temp13], 768(%[s0]) \n\t"
"lwc1 %[temp14], 1796(%[v0]) \n\t"
"lwc1 %[temp15], 772(%[s0]) \n\t"
"lwc1 %[temp16], 1800(%[v0]) \n\t"
"lwc1 %[temp17], 776(%[s0]) \n\t"
"lwc1 %[temp18], 1804(%[v0]) \n\t"
"lwc1 %[temp19], 780(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 2048(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 1024(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 2052(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 1028(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 2056(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 1032(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 2060(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 1036(%[s0]) \n\t"
"lwc1 %[temp12], 2816(%[v0]) \n\t"
"lwc1 %[temp13], 1280(%[s0]) \n\t"
"lwc1 %[temp14], 2820(%[v0]) \n\t"
"lwc1 %[temp15], 1284(%[s0]) \n\t"
"lwc1 %[temp16], 2824(%[v0]) \n\t"
"lwc1 %[temp17], 1288(%[s0]) \n\t"
"lwc1 %[temp18], 2828(%[v0]) \n\t"
"lwc1 %[temp19], 1292(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 3072(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 1536(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 3076(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 1540(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 3080(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 1544(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 3084(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 1548(%[s0]) \n\t"
"lwc1 %[temp12], 3840(%[v0]) \n\t"
"lwc1 %[temp13], 1792(%[s0]) \n\t"
"lwc1 %[temp14], 3844(%[v0]) \n\t"
"lwc1 %[temp15], 1796(%[s0]) \n\t"
"lwc1 %[temp16], 3848(%[v0]) \n\t"
"lwc1 %[temp17], 1800(%[s0]) \n\t"
"lwc1 %[temp18], 3852(%[v0]) \n\t"
"lwc1 %[temp19], 1804(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp4], 4096(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp5], 2048(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp6], 4100(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp7], 2052(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
"lwc1 %[temp8], 4104(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"lwc1 %[temp9], 2056(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"lwc1 %[temp10], 4108(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"lwc1 %[temp11], 2060(%[s0]) \n\t"
"lwc1 %[temp12], 4864(%[v0]) \n\t"
"lwc1 %[temp13], 2304(%[s0]) \n\t"
"lwc1 %[temp14], 4868(%[v0]) \n\t"
"lwc1 %[temp15], 2308(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
"lwc1 %[temp16], 4872(%[v0]) \n\t"
"madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
"lwc1 %[temp17], 2312(%[s0]) \n\t"
"madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
"lwc1 %[temp18], 4876(%[v0]) \n\t"
"madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
"lwc1 %[temp19], 2316(%[s0]) \n\t"
"madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
PTR_ADDIU "%[dst], %[dst], 16 \n\t"
"madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
"madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
"madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
"swc1 %[temp0], -16(%[dst]) \n\t"
"swc1 %[temp1], -12(%[dst]) \n\t"
"swc1 %[temp2], -8(%[dst]) \n\t"
"swc1 %[temp3], -4(%[dst]) \n\t"
".set pop \n\t"
: [dst]"+r"(dst), [v0]"+r"(vv0), [s0]"+r"(s0),
[temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
[temp12]"=&f"(temp12), [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
[temp15]"=&f"(temp15), [temp16]"=&f"(temp16), [temp17]"=&f"(temp17),
[temp18]"=&f"(temp18), [temp19]"=&f"(temp19)
: [v0_end]"r"(v0_end)
: "memory"
);
}
else
{
fdsp->vector_fmul (out, v , sbr_qmf_window , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 192 >> div), sbr_qmf_window + ( 64 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 256 >> div), sbr_qmf_window + (128 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 448 >> div), sbr_qmf_window + (192 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 512 >> div), sbr_qmf_window + (256 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 704 >> div), sbr_qmf_window + (320 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 768 >> div), sbr_qmf_window + (384 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + ( 960 >> div), sbr_qmf_window + (448 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + (1024 >> div), sbr_qmf_window + (512 >> div), out , 64 >> div);
fdsp->vector_fmul_add(out, v + (1216 >> div), sbr_qmf_window + (576 >> div), out , 64 >> div);
out += 64 >> div;
}
}
}
#define sbr_qmf_analysis sbr_qmf_analysis_mips
#define sbr_qmf_synthesis sbr_qmf_synthesis_mips
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_MIPS_AACSBR_MIPS_H */

View File

@ -1,912 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Darko Laus (darko@mips.com)
* Djordje Pesut (djordje@mips.com)
* Mirjana Vulin (mvulin@mips.com)
*
* AAC Spectral Band Replication decoding functions optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/sbrdsp.c
*/
#include "config.h"
#include "libavcodec/sbrdsp.h"
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
static void sbr_qmf_pre_shuffle_mips(float *z)
{
int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6;
float *z1 = &z[66];
float *z2 = &z[59];
float *z3 = &z[2];
float *z4 = z1 + 60;
/* loop unrolled 5 times */
__asm__ volatile (
"lui %[Temp6], 0x8000 \n\t"
"1: \n\t"
"lw %[Temp1], 0(%[z2]) \n\t"
"lw %[Temp2], 4(%[z2]) \n\t"
"lw %[Temp3], 8(%[z2]) \n\t"
"lw %[Temp4], 12(%[z2]) \n\t"
"lw %[Temp5], 16(%[z2]) \n\t"
"xor %[Temp1], %[Temp1], %[Temp6] \n\t"
"xor %[Temp2], %[Temp2], %[Temp6] \n\t"
"xor %[Temp3], %[Temp3], %[Temp6] \n\t"
"xor %[Temp4], %[Temp4], %[Temp6] \n\t"
"xor %[Temp5], %[Temp5], %[Temp6] \n\t"
PTR_ADDIU "%[z2], %[z2], -20 \n\t"
"sw %[Temp1], 32(%[z1]) \n\t"
"sw %[Temp2], 24(%[z1]) \n\t"
"sw %[Temp3], 16(%[z1]) \n\t"
"sw %[Temp4], 8(%[z1]) \n\t"
"sw %[Temp5], 0(%[z1]) \n\t"
"lw %[Temp1], 0(%[z3]) \n\t"
"lw %[Temp2], 4(%[z3]) \n\t"
"lw %[Temp3], 8(%[z3]) \n\t"
"lw %[Temp4], 12(%[z3]) \n\t"
"lw %[Temp5], 16(%[z3]) \n\t"
"sw %[Temp1], 4(%[z1]) \n\t"
"sw %[Temp2], 12(%[z1]) \n\t"
"sw %[Temp3], 20(%[z1]) \n\t"
"sw %[Temp4], 28(%[z1]) \n\t"
"sw %[Temp5], 36(%[z1]) \n\t"
PTR_ADDIU "%[z3], %[z3], 20 \n\t"
PTR_ADDIU "%[z1], %[z1], 40 \n\t"
"bne %[z1], %[z4], 1b \n\t"
"lw %[Temp1], 132(%[z]) \n\t"
"lw %[Temp2], 128(%[z]) \n\t"
"lw %[Temp3], 0(%[z]) \n\t"
"lw %[Temp4], 4(%[z]) \n\t"
"xor %[Temp1], %[Temp1], %[Temp6] \n\t"
"sw %[Temp1], 504(%[z]) \n\t"
"sw %[Temp2], 508(%[z]) \n\t"
"sw %[Temp3], 256(%[z]) \n\t"
"sw %[Temp4], 260(%[z]) \n\t"
: [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
[Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
[Temp5]"=&r"(Temp5), [Temp6]"=&r"(Temp6),
[z1]"+r"(z1), [z2]"+r"(z2), [z3]"+r"(z3)
: [z4]"r"(z4), [z]"r"(z)
: "memory"
);
}
static void sbr_qmf_post_shuffle_mips(float W[32][2], const float *z)
{
int Temp1, Temp2, Temp3, Temp4, Temp5;
float *W_ptr = (float *)W;
float *z1 = (float *)z;
float *z2 = (float *)&z[60];
float *z_end = z1 + 32;
/* loop unrolled 4 times */
__asm__ volatile (
"lui %[Temp5], 0x8000 \n\t"
"1: \n\t"
"lw %[Temp1], 0(%[z2]) \n\t"
"lw %[Temp2], 4(%[z2]) \n\t"
"lw %[Temp3], 8(%[z2]) \n\t"
"lw %[Temp4], 12(%[z2]) \n\t"
"xor %[Temp1], %[Temp1], %[Temp5] \n\t"
"xor %[Temp2], %[Temp2], %[Temp5] \n\t"
"xor %[Temp3], %[Temp3], %[Temp5] \n\t"
"xor %[Temp4], %[Temp4], %[Temp5] \n\t"
PTR_ADDIU "%[z2], %[z2], -16 \n\t"
"sw %[Temp1], 24(%[W_ptr]) \n\t"
"sw %[Temp2], 16(%[W_ptr]) \n\t"
"sw %[Temp3], 8(%[W_ptr]) \n\t"
"sw %[Temp4], 0(%[W_ptr]) \n\t"
"lw %[Temp1], 0(%[z1]) \n\t"
"lw %[Temp2], 4(%[z1]) \n\t"
"lw %[Temp3], 8(%[z1]) \n\t"
"lw %[Temp4], 12(%[z1]) \n\t"
"sw %[Temp1], 4(%[W_ptr]) \n\t"
"sw %[Temp2], 12(%[W_ptr]) \n\t"
"sw %[Temp3], 20(%[W_ptr]) \n\t"
"sw %[Temp4], 28(%[W_ptr]) \n\t"
PTR_ADDIU "%[z1], %[z1], 16 \n\t"
PTR_ADDIU "%[W_ptr],%[W_ptr], 32 \n\t"
"bne %[z1], %[z_end], 1b \n\t"
: [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
[Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
[Temp5]"=&r"(Temp5), [z1]"+r"(z1),
[z2]"+r"(z2), [W_ptr]"+r"(W_ptr)
: [z_end]"r"(z_end)
: "memory"
);
}
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static void sbr_sum64x5_mips(float *z)
{
int k;
float *z1;
float f1, f2, f3, f4, f5, f6, f7, f8;
for (k = 0; k < 64; k += 8) {
z1 = &z[k];
/* loop unrolled 8 times */
__asm__ volatile (
"lwc1 $f0, 0(%[z1]) \n\t"
"lwc1 $f1, 256(%[z1]) \n\t"
"lwc1 $f2, 4(%[z1]) \n\t"
"lwc1 $f3, 260(%[z1]) \n\t"
"lwc1 $f4, 8(%[z1]) \n\t"
"add.s %[f1], $f0, $f1 \n\t"
"lwc1 $f5, 264(%[z1]) \n\t"
"add.s %[f2], $f2, $f3 \n\t"
"lwc1 $f6, 12(%[z1]) \n\t"
"lwc1 $f7, 268(%[z1]) \n\t"
"add.s %[f3], $f4, $f5 \n\t"
"lwc1 $f8, 16(%[z1]) \n\t"
"lwc1 $f9, 272(%[z1]) \n\t"
"add.s %[f4], $f6, $f7 \n\t"
"lwc1 $f10, 20(%[z1]) \n\t"
"lwc1 $f11, 276(%[z1]) \n\t"
"add.s %[f5], $f8, $f9 \n\t"
"lwc1 $f12, 24(%[z1]) \n\t"
"lwc1 $f13, 280(%[z1]) \n\t"
"add.s %[f6], $f10, $f11 \n\t"
"lwc1 $f14, 28(%[z1]) \n\t"
"lwc1 $f15, 284(%[z1]) \n\t"
"add.s %[f7], $f12, $f13 \n\t"
"lwc1 $f0, 512(%[z1]) \n\t"
"lwc1 $f1, 516(%[z1]) \n\t"
"add.s %[f8], $f14, $f15 \n\t"
"lwc1 $f2, 520(%[z1]) \n\t"
"add.s %[f1], %[f1], $f0 \n\t"
"add.s %[f2], %[f2], $f1 \n\t"
"lwc1 $f3, 524(%[z1]) \n\t"
"add.s %[f3], %[f3], $f2 \n\t"
"lwc1 $f4, 528(%[z1]) \n\t"
"lwc1 $f5, 532(%[z1]) \n\t"
"add.s %[f4], %[f4], $f3 \n\t"
"lwc1 $f6, 536(%[z1]) \n\t"
"add.s %[f5], %[f5], $f4 \n\t"
"add.s %[f6], %[f6], $f5 \n\t"
"lwc1 $f7, 540(%[z1]) \n\t"
"add.s %[f7], %[f7], $f6 \n\t"
"lwc1 $f0, 768(%[z1]) \n\t"
"lwc1 $f1, 772(%[z1]) \n\t"
"add.s %[f8], %[f8], $f7 \n\t"
"lwc1 $f2, 776(%[z1]) \n\t"
"add.s %[f1], %[f1], $f0 \n\t"
"add.s %[f2], %[f2], $f1 \n\t"
"lwc1 $f3, 780(%[z1]) \n\t"
"add.s %[f3], %[f3], $f2 \n\t"
"lwc1 $f4, 784(%[z1]) \n\t"
"lwc1 $f5, 788(%[z1]) \n\t"
"add.s %[f4], %[f4], $f3 \n\t"
"lwc1 $f6, 792(%[z1]) \n\t"
"add.s %[f5], %[f5], $f4 \n\t"
"add.s %[f6], %[f6], $f5 \n\t"
"lwc1 $f7, 796(%[z1]) \n\t"
"add.s %[f7], %[f7], $f6 \n\t"
"lwc1 $f0, 1024(%[z1]) \n\t"
"lwc1 $f1, 1028(%[z1]) \n\t"
"add.s %[f8], %[f8], $f7 \n\t"
"lwc1 $f2, 1032(%[z1]) \n\t"
"add.s %[f1], %[f1], $f0 \n\t"
"add.s %[f2], %[f2], $f1 \n\t"
"lwc1 $f3, 1036(%[z1]) \n\t"
"add.s %[f3], %[f3], $f2 \n\t"
"lwc1 $f4, 1040(%[z1]) \n\t"
"lwc1 $f5, 1044(%[z1]) \n\t"
"add.s %[f4], %[f4], $f3 \n\t"
"lwc1 $f6, 1048(%[z1]) \n\t"
"add.s %[f5], %[f5], $f4 \n\t"
"add.s %[f6], %[f6], $f5 \n\t"
"lwc1 $f7, 1052(%[z1]) \n\t"
"add.s %[f7], %[f7], $f6 \n\t"
"swc1 %[f1], 0(%[z1]) \n\t"
"swc1 %[f2], 4(%[z1]) \n\t"
"add.s %[f8], %[f8], $f7 \n\t"
"swc1 %[f3], 8(%[z1]) \n\t"
"swc1 %[f4], 12(%[z1]) \n\t"
"swc1 %[f5], 16(%[z1]) \n\t"
"swc1 %[f6], 20(%[z1]) \n\t"
"swc1 %[f7], 24(%[z1]) \n\t"
"swc1 %[f8], 28(%[z1]) \n\t"
: [f1]"=&f"(f1), [f2]"=&f"(f2), [f3]"=&f"(f3),
[f4]"=&f"(f4), [f5]"=&f"(f5), [f6]"=&f"(f6),
[f7]"=&f"(f7), [f8]"=&f"(f8)
: [z1]"r"(z1)
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5",
"$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
"$f12", "$f13", "$f14", "$f15",
"memory"
);
}
}
static float sbr_sum_square_mips(float (*x)[2], int n)
{
float sum0 = 0.0f, sum1 = 0.0f;
float *p_x;
float temp0, temp1, temp2, temp3;
float *loop_end;
p_x = &x[0][0];
loop_end = p_x + (n >> 1)*4 - 4;
__asm__ volatile (
".set push \n\t"
".set noreorder \n\t"
"lwc1 %[temp0], 0(%[p_x]) \n\t"
"lwc1 %[temp1], 4(%[p_x]) \n\t"
"lwc1 %[temp2], 8(%[p_x]) \n\t"
"lwc1 %[temp3], 12(%[p_x]) \n\t"
"1: \n\t"
PTR_ADDIU "%[p_x], %[p_x], 16 \n\t"
"madd.s %[sum0], %[sum0], %[temp0], %[temp0] \n\t"
"lwc1 %[temp0], 0(%[p_x]) \n\t"
"madd.s %[sum1], %[sum1], %[temp1], %[temp1] \n\t"
"lwc1 %[temp1], 4(%[p_x]) \n\t"
"madd.s %[sum0], %[sum0], %[temp2], %[temp2] \n\t"
"lwc1 %[temp2], 8(%[p_x]) \n\t"
"madd.s %[sum1], %[sum1], %[temp3], %[temp3] \n\t"
"bne %[p_x], %[loop_end], 1b \n\t"
" lwc1 %[temp3], 12(%[p_x]) \n\t"
"madd.s %[sum0], %[sum0], %[temp0], %[temp0] \n\t"
"madd.s %[sum1], %[sum1], %[temp1], %[temp1] \n\t"
"madd.s %[sum0], %[sum0], %[temp2], %[temp2] \n\t"
"madd.s %[sum1], %[sum1], %[temp3], %[temp3] \n\t"
".set pop \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [sum0]"+f"(sum0), [sum1]"+f"(sum1),
[p_x]"+r"(p_x)
: [loop_end]"r"(loop_end)
: "memory"
);
return sum0 + sum1;
}
static void sbr_qmf_deint_bfly_mips(float *v, const float *src0, const float *src1)
{
int i;
float temp0, temp1, temp2, temp3, temp4, temp5;
float temp6, temp7, temp8, temp9, temp10, temp11;
float *v0 = v;
float *v1 = &v[127];
float *psrc0 = (float*)src0;
float *psrc1 = (float*)&src1[63];
for (i = 0; i < 4; i++) {
/* loop unrolled 16 times */
__asm__ volatile(
"lwc1 %[temp0], 0(%[src0]) \n\t"
"lwc1 %[temp1], 0(%[src1]) \n\t"
"lwc1 %[temp3], 4(%[src0]) \n\t"
"lwc1 %[temp4], -4(%[src1]) \n\t"
"lwc1 %[temp6], 8(%[src0]) \n\t"
"lwc1 %[temp7], -8(%[src1]) \n\t"
"lwc1 %[temp9], 12(%[src0]) \n\t"
"lwc1 %[temp10], -12(%[src1]) \n\t"
"add.s %[temp2], %[temp0], %[temp1] \n\t"
"add.s %[temp5], %[temp3], %[temp4] \n\t"
"add.s %[temp8], %[temp6], %[temp7] \n\t"
"add.s %[temp11], %[temp9], %[temp10] \n\t"
"sub.s %[temp0], %[temp0], %[temp1] \n\t"
"sub.s %[temp3], %[temp3], %[temp4] \n\t"
"sub.s %[temp6], %[temp6], %[temp7] \n\t"
"sub.s %[temp9], %[temp9], %[temp10] \n\t"
"swc1 %[temp2], 0(%[v1]) \n\t"
"swc1 %[temp0], 0(%[v0]) \n\t"
"swc1 %[temp5], -4(%[v1]) \n\t"
"swc1 %[temp3], 4(%[v0]) \n\t"
"swc1 %[temp8], -8(%[v1]) \n\t"
"swc1 %[temp6], 8(%[v0]) \n\t"
"swc1 %[temp11], -12(%[v1]) \n\t"
"swc1 %[temp9], 12(%[v0]) \n\t"
"lwc1 %[temp0], 16(%[src0]) \n\t"
"lwc1 %[temp1], -16(%[src1]) \n\t"
"lwc1 %[temp3], 20(%[src0]) \n\t"
"lwc1 %[temp4], -20(%[src1]) \n\t"
"lwc1 %[temp6], 24(%[src0]) \n\t"
"lwc1 %[temp7], -24(%[src1]) \n\t"
"lwc1 %[temp9], 28(%[src0]) \n\t"
"lwc1 %[temp10], -28(%[src1]) \n\t"
"add.s %[temp2], %[temp0], %[temp1] \n\t"
"add.s %[temp5], %[temp3], %[temp4] \n\t"
"add.s %[temp8], %[temp6], %[temp7] \n\t"
"add.s %[temp11], %[temp9], %[temp10] \n\t"
"sub.s %[temp0], %[temp0], %[temp1] \n\t"
"sub.s %[temp3], %[temp3], %[temp4] \n\t"
"sub.s %[temp6], %[temp6], %[temp7] \n\t"
"sub.s %[temp9], %[temp9], %[temp10] \n\t"
"swc1 %[temp2], -16(%[v1]) \n\t"
"swc1 %[temp0], 16(%[v0]) \n\t"
"swc1 %[temp5], -20(%[v1]) \n\t"
"swc1 %[temp3], 20(%[v0]) \n\t"
"swc1 %[temp8], -24(%[v1]) \n\t"
"swc1 %[temp6], 24(%[v0]) \n\t"
"swc1 %[temp11], -28(%[v1]) \n\t"
"swc1 %[temp9], 28(%[v0]) \n\t"
"lwc1 %[temp0], 32(%[src0]) \n\t"
"lwc1 %[temp1], -32(%[src1]) \n\t"
"lwc1 %[temp3], 36(%[src0]) \n\t"
"lwc1 %[temp4], -36(%[src1]) \n\t"
"lwc1 %[temp6], 40(%[src0]) \n\t"
"lwc1 %[temp7], -40(%[src1]) \n\t"
"lwc1 %[temp9], 44(%[src0]) \n\t"
"lwc1 %[temp10], -44(%[src1]) \n\t"
"add.s %[temp2], %[temp0], %[temp1] \n\t"
"add.s %[temp5], %[temp3], %[temp4] \n\t"
"add.s %[temp8], %[temp6], %[temp7] \n\t"
"add.s %[temp11], %[temp9], %[temp10] \n\t"
"sub.s %[temp0], %[temp0], %[temp1] \n\t"
"sub.s %[temp3], %[temp3], %[temp4] \n\t"
"sub.s %[temp6], %[temp6], %[temp7] \n\t"
"sub.s %[temp9], %[temp9], %[temp10] \n\t"
"swc1 %[temp2], -32(%[v1]) \n\t"
"swc1 %[temp0], 32(%[v0]) \n\t"
"swc1 %[temp5], -36(%[v1]) \n\t"
"swc1 %[temp3], 36(%[v0]) \n\t"
"swc1 %[temp8], -40(%[v1]) \n\t"
"swc1 %[temp6], 40(%[v0]) \n\t"
"swc1 %[temp11], -44(%[v1]) \n\t"
"swc1 %[temp9], 44(%[v0]) \n\t"
"lwc1 %[temp0], 48(%[src0]) \n\t"
"lwc1 %[temp1], -48(%[src1]) \n\t"
"lwc1 %[temp3], 52(%[src0]) \n\t"
"lwc1 %[temp4], -52(%[src1]) \n\t"
"lwc1 %[temp6], 56(%[src0]) \n\t"
"lwc1 %[temp7], -56(%[src1]) \n\t"
"lwc1 %[temp9], 60(%[src0]) \n\t"
"lwc1 %[temp10], -60(%[src1]) \n\t"
"add.s %[temp2], %[temp0], %[temp1] \n\t"
"add.s %[temp5], %[temp3], %[temp4] \n\t"
"add.s %[temp8], %[temp6], %[temp7] \n\t"
"add.s %[temp11], %[temp9], %[temp10] \n\t"
"sub.s %[temp0], %[temp0], %[temp1] \n\t"
"sub.s %[temp3], %[temp3], %[temp4] \n\t"
"sub.s %[temp6], %[temp6], %[temp7] \n\t"
"sub.s %[temp9], %[temp9], %[temp10] \n\t"
"swc1 %[temp2], -48(%[v1]) \n\t"
"swc1 %[temp0], 48(%[v0]) \n\t"
"swc1 %[temp5], -52(%[v1]) \n\t"
"swc1 %[temp3], 52(%[v0]) \n\t"
"swc1 %[temp8], -56(%[v1]) \n\t"
"swc1 %[temp6], 56(%[v0]) \n\t"
"swc1 %[temp11], -60(%[v1]) \n\t"
"swc1 %[temp9], 60(%[v0]) \n\t"
PTR_ADDIU " %[src0], %[src0], 64 \n\t"
PTR_ADDIU " %[src1], %[src1], -64 \n\t"
PTR_ADDIU " %[v0], %[v0], 64 \n\t"
PTR_ADDIU " %[v1], %[v1], -64 \n\t"
: [v0]"+r"(v0), [v1]"+r"(v1), [src0]"+r"(psrc0), [src1]"+r"(psrc1),
[temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11)
:
:"memory"
);
}
}
static void sbr_autocorrelate_mips(const float x[40][2], float phi[3][2][2])
{
int i;
float real_sum_0 = 0.0f;
float real_sum_1 = 0.0f;
float real_sum_2 = 0.0f;
float imag_sum_1 = 0.0f;
float imag_sum_2 = 0.0f;
float *p_x, *p_phi;
float temp0, temp1, temp2, temp3, temp4, temp5, temp6;
float temp7, temp_r, temp_r1, temp_r2, temp_r3, temp_r4;
p_x = (float*)&x[0][0];
p_phi = &phi[0][0][0];
__asm__ volatile (
"lwc1 %[temp0], 8(%[p_x]) \n\t"
"lwc1 %[temp1], 12(%[p_x]) \n\t"
"lwc1 %[temp2], 16(%[p_x]) \n\t"
"lwc1 %[temp3], 20(%[p_x]) \n\t"
"lwc1 %[temp4], 24(%[p_x]) \n\t"
"lwc1 %[temp5], 28(%[p_x]) \n\t"
"mul.s %[temp_r], %[temp1], %[temp1] \n\t"
"mul.s %[temp_r1], %[temp1], %[temp3] \n\t"
"mul.s %[temp_r2], %[temp1], %[temp2] \n\t"
"mul.s %[temp_r3], %[temp1], %[temp5] \n\t"
"mul.s %[temp_r4], %[temp1], %[temp4] \n\t"
"madd.s %[temp_r], %[temp_r], %[temp0], %[temp0] \n\t"
"madd.s %[temp_r1], %[temp_r1], %[temp0], %[temp2] \n\t"
"msub.s %[temp_r2], %[temp_r2], %[temp0], %[temp3] \n\t"
"madd.s %[temp_r3], %[temp_r3], %[temp0], %[temp4] \n\t"
"msub.s %[temp_r4], %[temp_r4], %[temp0], %[temp5] \n\t"
"add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
"add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
"add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
"add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
"add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
PTR_ADDIU "%[p_x], %[p_x], 8 \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
[imag_sum_1]"+f"(imag_sum_1), [real_sum_2]"+f"(real_sum_2),
[temp_r]"=&f"(temp_r), [temp_r1]"=&f"(temp_r1), [temp_r2]"=&f"(temp_r2),
[temp_r3]"=&f"(temp_r3), [temp_r4]"=&f"(temp_r4),
[p_x]"+r"(p_x), [imag_sum_2]"+f"(imag_sum_2)
:
: "memory"
);
for (i = 0; i < 12; i++) {
__asm__ volatile (
"lwc1 %[temp0], 8(%[p_x]) \n\t"
"lwc1 %[temp1], 12(%[p_x]) \n\t"
"lwc1 %[temp2], 16(%[p_x]) \n\t"
"lwc1 %[temp3], 20(%[p_x]) \n\t"
"lwc1 %[temp4], 24(%[p_x]) \n\t"
"lwc1 %[temp5], 28(%[p_x]) \n\t"
"mul.s %[temp_r], %[temp1], %[temp1] \n\t"
"mul.s %[temp_r1], %[temp1], %[temp3] \n\t"
"mul.s %[temp_r2], %[temp1], %[temp2] \n\t"
"mul.s %[temp_r3], %[temp1], %[temp5] \n\t"
"mul.s %[temp_r4], %[temp1], %[temp4] \n\t"
"madd.s %[temp_r], %[temp_r], %[temp0], %[temp0] \n\t"
"madd.s %[temp_r1], %[temp_r1], %[temp0], %[temp2] \n\t"
"msub.s %[temp_r2], %[temp_r2], %[temp0], %[temp3] \n\t"
"madd.s %[temp_r3], %[temp_r3], %[temp0], %[temp4] \n\t"
"msub.s %[temp_r4], %[temp_r4], %[temp0], %[temp5] \n\t"
"add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
"add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
"add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
"add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
"add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
"lwc1 %[temp0], 32(%[p_x]) \n\t"
"lwc1 %[temp1], 36(%[p_x]) \n\t"
"mul.s %[temp_r], %[temp3], %[temp3] \n\t"
"mul.s %[temp_r1], %[temp3], %[temp5] \n\t"
"mul.s %[temp_r2], %[temp3], %[temp4] \n\t"
"mul.s %[temp_r3], %[temp3], %[temp1] \n\t"
"mul.s %[temp_r4], %[temp3], %[temp0] \n\t"
"madd.s %[temp_r], %[temp_r], %[temp2], %[temp2] \n\t"
"madd.s %[temp_r1], %[temp_r1], %[temp2], %[temp4] \n\t"
"msub.s %[temp_r2], %[temp_r2], %[temp2], %[temp5] \n\t"
"madd.s %[temp_r3], %[temp_r3], %[temp2], %[temp0] \n\t"
"msub.s %[temp_r4], %[temp_r4], %[temp2], %[temp1] \n\t"
"add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
"add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
"add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
"add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
"add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
"lwc1 %[temp2], 40(%[p_x]) \n\t"
"lwc1 %[temp3], 44(%[p_x]) \n\t"
"mul.s %[temp_r], %[temp5], %[temp5] \n\t"
"mul.s %[temp_r1], %[temp5], %[temp1] \n\t"
"mul.s %[temp_r2], %[temp5], %[temp0] \n\t"
"mul.s %[temp_r3], %[temp5], %[temp3] \n\t"
"mul.s %[temp_r4], %[temp5], %[temp2] \n\t"
"madd.s %[temp_r], %[temp_r], %[temp4], %[temp4] \n\t"
"madd.s %[temp_r1], %[temp_r1], %[temp4], %[temp0] \n\t"
"msub.s %[temp_r2], %[temp_r2], %[temp4], %[temp1] \n\t"
"madd.s %[temp_r3], %[temp_r3], %[temp4], %[temp2] \n\t"
"msub.s %[temp_r4], %[temp_r4], %[temp4], %[temp3] \n\t"
"add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
"add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
"add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
"add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
"add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
PTR_ADDIU "%[p_x], %[p_x], 24 \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
[imag_sum_1]"+f"(imag_sum_1), [real_sum_2]"+f"(real_sum_2),
[temp_r]"=&f"(temp_r), [temp_r1]"=&f"(temp_r1),
[temp_r2]"=&f"(temp_r2), [temp_r3]"=&f"(temp_r3),
[temp_r4]"=&f"(temp_r4), [p_x]"+r"(p_x),
[imag_sum_2]"+f"(imag_sum_2)
:
: "memory"
);
}
__asm__ volatile (
"lwc1 %[temp0], -296(%[p_x]) \n\t"
"lwc1 %[temp1], -292(%[p_x]) \n\t"
"lwc1 %[temp2], 8(%[p_x]) \n\t"
"lwc1 %[temp3], 12(%[p_x]) \n\t"
"lwc1 %[temp4], -288(%[p_x]) \n\t"
"lwc1 %[temp5], -284(%[p_x]) \n\t"
"lwc1 %[temp6], -280(%[p_x]) \n\t"
"lwc1 %[temp7], -276(%[p_x]) \n\t"
"madd.s %[temp_r], %[real_sum_0], %[temp0], %[temp0] \n\t"
"madd.s %[temp_r1], %[real_sum_0], %[temp2], %[temp2] \n\t"
"madd.s %[temp_r2], %[real_sum_1], %[temp0], %[temp4] \n\t"
"madd.s %[temp_r3], %[imag_sum_1], %[temp0], %[temp5] \n\t"
"madd.s %[temp_r], %[temp_r], %[temp1], %[temp1] \n\t"
"madd.s %[temp_r1], %[temp_r1], %[temp3], %[temp3] \n\t"
"madd.s %[temp_r2], %[temp_r2], %[temp1], %[temp5] \n\t"
"nmsub.s %[temp_r3], %[temp_r3], %[temp1], %[temp4] \n\t"
"lwc1 %[temp4], 16(%[p_x]) \n\t"
"lwc1 %[temp5], 20(%[p_x]) \n\t"
"swc1 %[temp_r], 40(%[p_phi]) \n\t"
"swc1 %[temp_r1], 16(%[p_phi]) \n\t"
"swc1 %[temp_r2], 24(%[p_phi]) \n\t"
"swc1 %[temp_r3], 28(%[p_phi]) \n\t"
"madd.s %[temp_r], %[real_sum_1], %[temp2], %[temp4] \n\t"
"madd.s %[temp_r1], %[imag_sum_1], %[temp2], %[temp5] \n\t"
"madd.s %[temp_r2], %[real_sum_2], %[temp0], %[temp6] \n\t"
"madd.s %[temp_r3], %[imag_sum_2], %[temp0], %[temp7] \n\t"
"madd.s %[temp_r], %[temp_r], %[temp3], %[temp5] \n\t"
"nmsub.s %[temp_r1], %[temp_r1], %[temp3], %[temp4] \n\t"
"madd.s %[temp_r2], %[temp_r2], %[temp1], %[temp7] \n\t"
"nmsub.s %[temp_r3], %[temp_r3], %[temp1], %[temp6] \n\t"
"swc1 %[temp_r], 0(%[p_phi]) \n\t"
"swc1 %[temp_r1], 4(%[p_phi]) \n\t"
"swc1 %[temp_r2], 8(%[p_phi]) \n\t"
"swc1 %[temp_r3], 12(%[p_phi]) \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp_r]"=&f"(temp_r),
[real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
[real_sum_2]"+f"(real_sum_2), [imag_sum_1]"+f"(imag_sum_1),
[temp_r2]"=&f"(temp_r2), [temp_r3]"=&f"(temp_r3),
[temp_r1]"=&f"(temp_r1), [p_phi]"+r"(p_phi),
[imag_sum_2]"+f"(imag_sum_2)
: [p_x]"r"(p_x)
: "memory"
);
}
static void sbr_hf_gen_mips(float (*X_high)[2], const float (*X_low)[2],
const float alpha0[2], const float alpha1[2],
float bw, int start, int end)
{
float alpha[4];
int i;
float *p_x_low = (float*)&X_low[0][0] + 2*start;
float *p_x_high = &X_high[0][0] + 2*start;
float temp0, temp1, temp2, temp3, temp4, temp5, temp6;
float temp7, temp8, temp9, temp10, temp11, temp12;
alpha[0] = alpha1[0] * bw * bw;
alpha[1] = alpha1[1] * bw * bw;
alpha[2] = alpha0[0] * bw;
alpha[3] = alpha0[1] * bw;
for (i = start; i < end; i++) {
__asm__ volatile (
"lwc1 %[temp0], -16(%[p_x_low]) \n\t"
"lwc1 %[temp1], -12(%[p_x_low]) \n\t"
"lwc1 %[temp2], -8(%[p_x_low]) \n\t"
"lwc1 %[temp3], -4(%[p_x_low]) \n\t"
"lwc1 %[temp5], 0(%[p_x_low]) \n\t"
"lwc1 %[temp6], 4(%[p_x_low]) \n\t"
"lwc1 %[temp7], 0(%[alpha]) \n\t"
"lwc1 %[temp8], 4(%[alpha]) \n\t"
"lwc1 %[temp9], 8(%[alpha]) \n\t"
"lwc1 %[temp10], 12(%[alpha]) \n\t"
PTR_ADDIU "%[p_x_high], %[p_x_high], 8 \n\t"
PTR_ADDIU "%[p_x_low], %[p_x_low], 8 \n\t"
"mul.s %[temp11], %[temp1], %[temp8] \n\t"
"msub.s %[temp11], %[temp11], %[temp0], %[temp7] \n\t"
"madd.s %[temp11], %[temp11], %[temp2], %[temp9] \n\t"
"nmsub.s %[temp11], %[temp11], %[temp3], %[temp10] \n\t"
"add.s %[temp11], %[temp11], %[temp5] \n\t"
"swc1 %[temp11], -8(%[p_x_high]) \n\t"
"mul.s %[temp12], %[temp1], %[temp7] \n\t"
"madd.s %[temp12], %[temp12], %[temp0], %[temp8] \n\t"
"madd.s %[temp12], %[temp12], %[temp3], %[temp9] \n\t"
"madd.s %[temp12], %[temp12], %[temp2], %[temp10] \n\t"
"add.s %[temp12], %[temp12], %[temp6] \n\t"
"swc1 %[temp12], -4(%[p_x_high]) \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
[temp12]"=&f"(temp12), [p_x_high]"+r"(p_x_high),
[p_x_low]"+r"(p_x_low)
: [alpha]"r"(alpha)
: "memory"
);
}
}
static void sbr_hf_g_filt_mips(float (*Y)[2], const float (*X_high)[40][2],
const float *g_filt, int m_max, intptr_t ixh)
{
const float *p_x, *p_g, *loop_end;
float *p_y;
float temp0, temp1, temp2;
p_g = &g_filt[0];
p_y = &Y[0][0];
p_x = &X_high[0][ixh][0];
loop_end = p_g + m_max;
__asm__ volatile(
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lwc1 %[temp0], 0(%[p_g]) \n\t"
"lwc1 %[temp1], 0(%[p_x]) \n\t"
"lwc1 %[temp2], 4(%[p_x]) \n\t"
"mul.s %[temp1], %[temp1], %[temp0] \n\t"
"mul.s %[temp2], %[temp2], %[temp0] \n\t"
PTR_ADDIU "%[p_g], %[p_g], 4 \n\t"
PTR_ADDIU "%[p_x], %[p_x], 320 \n\t"
"swc1 %[temp1], 0(%[p_y]) \n\t"
"swc1 %[temp2], 4(%[p_y]) \n\t"
"bne %[p_g], %[loop_end], 1b \n\t"
PTR_ADDIU "%[p_y], %[p_y], 8 \n\t"
".set pop \n\t"
: [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
[temp2]"=&f"(temp2), [p_x]"+r"(p_x),
[p_y]"+r"(p_y), [p_g]"+r"(p_g)
: [loop_end]"r"(loop_end)
: "memory"
);
}
static void sbr_hf_apply_noise_0_mips(float (*Y)[2], const float *s_m,
const float *q_filt, int noise,
int kx, int m_max)
{
int m;
for (m = 0; m < m_max; m++){
float *Y1=&Y[m][0];
float *ff_table;
float y0,y1, temp1, temp2, temp4, temp5;
int temp0, temp3;
const float *s_m1=&s_m[m];
const float *q_filt1= &q_filt[m];
__asm__ volatile(
"lwc1 %[y0], 0(%[Y1]) \n\t"
"lwc1 %[temp1], 0(%[s_m1]) \n\t"
"addiu %[noise], %[noise], 1 \n\t"
"andi %[noise], %[noise], 0x1ff \n\t"
"sll %[temp0], %[noise], 3 \n\t"
PTR_ADDU "%[ff_table],%[ff_sbr_noise_table], %[temp0] \n\t"
"add.s %[y0], %[y0], %[temp1] \n\t"
"mfc1 %[temp3], %[temp1] \n\t"
"bne %[temp3], $0, 1f \n\t"
"lwc1 %[y1], 4(%[Y1]) \n\t"
"lwc1 %[temp2], 0(%[q_filt1]) \n\t"
"lwc1 %[temp4], 0(%[ff_table]) \n\t"
"lwc1 %[temp5], 4(%[ff_table]) \n\t"
"madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
"madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
"swc1 %[y1], 4(%[Y1]) \n\t"
"1: \n\t"
"swc1 %[y0], 0(%[Y1]) \n\t"
: [ff_table]"=&r"(ff_table), [y0]"=&f"(y0), [y1]"=&f"(y1),
[temp0]"=&r"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
: [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
[Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1)
: "memory"
);
}
}
static void sbr_hf_apply_noise_1_mips(float (*Y)[2], const float *s_m,
const float *q_filt, int noise,
int kx, int m_max)
{
float y0,y1,temp1, temp2, temp4, temp5;
int temp0, temp3, m;
float phi_sign = 1 - 2 * (kx & 1);
for (m = 0; m < m_max; m++) {
float *ff_table;
float *Y1=&Y[m][0];
const float *s_m1=&s_m[m];
const float *q_filt1= &q_filt[m];
__asm__ volatile(
"lwc1 %[y1], 4(%[Y1]) \n\t"
"lwc1 %[temp1], 0(%[s_m1]) \n\t"
"lw %[temp3], 0(%[s_m1]) \n\t"
"addiu %[noise], %[noise], 1 \n\t"
"andi %[noise], %[noise], 0x1ff \n\t"
"sll %[temp0], %[noise], 3 \n\t"
PTR_ADDU "%[ff_table],%[ff_sbr_noise_table],%[temp0] \n\t"
"madd.s %[y1], %[y1], %[temp1], %[phi_sign] \n\t"
"bne %[temp3], $0, 1f \n\t"
"lwc1 %[y0], 0(%[Y1]) \n\t"
"lwc1 %[temp2], 0(%[q_filt1]) \n\t"
"lwc1 %[temp4], 0(%[ff_table]) \n\t"
"lwc1 %[temp5], 4(%[ff_table]) \n\t"
"madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
"madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
"swc1 %[y0], 0(%[Y1]) \n\t"
"1: \n\t"
"swc1 %[y1], 4(%[Y1]) \n\t"
: [ff_table] "=&r" (ff_table), [y0] "=&f" (y0), [y1] "=&f" (y1),
[temp0] "=&r" (temp0), [temp1] "=&f" (temp1), [temp2] "=&f" (temp2),
[temp3] "=&r" (temp3), [temp4] "=&f" (temp4), [temp5] "=&f" (temp5)
: [ff_sbr_noise_table] "r" (ff_sbr_noise_table), [noise] "r" (noise),
[Y1] "r" (Y1), [s_m1] "r" (s_m1), [q_filt1] "r" (q_filt1),
[phi_sign] "f" (phi_sign)
: "memory"
);
phi_sign = -phi_sign;
}
}
static void sbr_hf_apply_noise_2_mips(float (*Y)[2], const float *s_m,
const float *q_filt, int noise,
int kx, int m_max)
{
int m, temp0, temp1;
float *ff_table;
float y0, y1, temp2, temp3, temp4, temp5;
for (m = 0; m < m_max; m++) {
float *Y1=&Y[m][0];
const float *s_m1=&s_m[m];
const float *q_filt1= &q_filt[m];
__asm__ volatile(
"lwc1 %[y0], 0(%[Y1]) \n\t"
"lwc1 %[temp3], 0(%[s_m1]) \n\t"
"addiu %[noise], %[noise], 1 \n\t"
"andi %[noise], %[noise], 0x1ff \n\t"
"sll %[temp0], %[noise], 3 \n\t"
PTR_ADDU "%[ff_table],%[ff_sbr_noise_table],%[temp0] \n\t"
"sub.s %[y0], %[y0], %[temp3] \n\t"
"mfc1 %[temp1], %[temp3] \n\t"
"bne %[temp1], $0, 1f \n\t"
"lwc1 %[y1], 4(%[Y1]) \n\t"
"lwc1 %[temp2], 0(%[q_filt1]) \n\t"
"lwc1 %[temp4], 0(%[ff_table]) \n\t"
"lwc1 %[temp5], 4(%[ff_table]) \n\t"
"madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
"madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
"swc1 %[y1], 4(%[Y1]) \n\t"
"1: \n\t"
"swc1 %[y0], 0(%[Y1]) \n\t"
: [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [y0]"=&f"(y0),
[y1]"=&f"(y1), [ff_table]"=&r"(ff_table),
[temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
[temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
: [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
[Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1)
: "memory"
);
}
}
static void sbr_hf_apply_noise_3_mips(float (*Y)[2], const float *s_m,
const float *q_filt, int noise,
int kx, int m_max)
{
float phi_sign = 1 - 2 * (kx & 1);
int m;
for (m = 0; m < m_max; m++) {
float *Y1=&Y[m][0];
float *ff_table;
float y0,y1, temp1, temp2, temp4, temp5;
int temp0, temp3;
const float *s_m1=&s_m[m];
const float *q_filt1= &q_filt[m];
__asm__ volatile(
"lwc1 %[y1], 4(%[Y1]) \n\t"
"lwc1 %[temp1], 0(%[s_m1]) \n\t"
"addiu %[noise], %[noise], 1 \n\t"
"andi %[noise], %[noise], 0x1ff \n\t"
"sll %[temp0], %[noise], 3 \n\t"
PTR_ADDU "%[ff_table],%[ff_sbr_noise_table], %[temp0] \n\t"
"nmsub.s %[y1], %[y1], %[temp1], %[phi_sign] \n\t"
"mfc1 %[temp3], %[temp1] \n\t"
"bne %[temp3], $0, 1f \n\t"
"lwc1 %[y0], 0(%[Y1]) \n\t"
"lwc1 %[temp2], 0(%[q_filt1]) \n\t"
"lwc1 %[temp4], 0(%[ff_table]) \n\t"
"lwc1 %[temp5], 4(%[ff_table]) \n\t"
"madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
"madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
"swc1 %[y0], 0(%[Y1]) \n\t"
"1: \n\t"
"swc1 %[y1], 4(%[Y1]) \n\t"
: [ff_table]"=&r"(ff_table), [y0]"=&f"(y0), [y1]"=&f"(y1),
[temp0]"=&r"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
: [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
[Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1),
[phi_sign]"f"(phi_sign)
: "memory"
);
phi_sign = -phi_sign;
}
}
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
void ff_sbrdsp_init_mips(SBRDSPContext *s)
{
#if HAVE_INLINE_ASM
#if HAVE_MIPSFPU
s->qmf_pre_shuffle = sbr_qmf_pre_shuffle_mips;
s->qmf_post_shuffle = sbr_qmf_post_shuffle_mips;
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
s->sum64x5 = sbr_sum64x5_mips;
s->sum_square = sbr_sum_square_mips;
s->qmf_deint_bfly = sbr_qmf_deint_bfly_mips;
s->autocorrelate = sbr_autocorrelate_mips;
s->hf_gen = sbr_hf_gen_mips;
s->hf_g_filt = sbr_hf_g_filt_mips;
s->hf_apply_noise[0] = sbr_hf_apply_noise_0_mips;
s->hf_apply_noise[1] = sbr_hf_apply_noise_1_mips;
s->hf_apply_noise[2] = sbr_hf_apply_noise_2_mips;
s->hf_apply_noise[3] = sbr_hf_apply_noise_3_mips;
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_MIPSFPU */
#endif /* HAVE_INLINE_ASM */
}

View File

@ -217,6 +217,4 @@ struct SpectralBandReplication {
AACSBRContext c;
};
void ff_aacsbr_func_ptr_init_mips(AACSBRContext *c);
#endif /* AVCODEC_SBR_H */

View File

@ -50,6 +50,5 @@ void ff_sbrdsp_init_arm(SBRDSPContext *s);
void ff_sbrdsp_init_aarch64(SBRDSPContext *s);
void ff_sbrdsp_init_riscv(SBRDSPContext *s);
void ff_sbrdsp_init_x86(SBRDSPContext *s);
void ff_sbrdsp_init_mips(SBRDSPContext *s);
#endif /* AVCODEC_SBRDSP_H */

View File

@ -104,8 +104,6 @@ av_cold void AAC_RENAME(ff_sbrdsp_init)(SBRDSPContext *s)
ff_sbrdsp_init_riscv(s);
#elif ARCH_X86
ff_sbrdsp_init_x86(s);
#elif ARCH_MIPS
ff_sbrdsp_init_mips(s);
#endif
#endif /* !USE_FIXED */
}