VSX version of vpx_mbpost_proc_ip
Low bit depth version only. Passes the VpxMbPostProcAcrossIpTest. VpxMbPostProcAcrossIpTest Speed Test (POWER8 Model 2.1) C time = 188.5ms (±0.2ms), VSX time = 65.2ms (±0.1ms) [2.9x] Change-Id: I1cf72365d94a9d7f1e9323925a87a30e3bd5cfe2
This commit is contained in:
@@ -164,11 +164,21 @@ TEST_P(VpxPostProcDownAndAcrossMbRowTest, CheckCvsAssembly) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
class VpxMbPostProcAcrossIpTest
|
class VpxMbPostProcAcrossIpTest
|
||||||
: public ::testing::TestWithParam<VpxMbPostProcAcrossIpFunc> {
|
: public AbstractBench,
|
||||||
|
public ::testing::TestWithParam<VpxMbPostProcAcrossIpFunc> {
|
||||||
public:
|
public:
|
||||||
|
VpxMbPostProcAcrossIpTest()
|
||||||
|
: rows(16), cols(16), mbPostProcAcrossIp(GetParam()),
|
||||||
|
src(Buffer<uint8_t>(rows, cols, 8, 8, 17, 8)) {}
|
||||||
virtual void TearDown() { libvpx_test::ClearSystemState(); }
|
virtual void TearDown() { libvpx_test::ClearSystemState(); }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
const int rows;
|
||||||
|
const int cols;
|
||||||
|
const VpxMbPostProcAcrossIpFunc mbPostProcAcrossIp;
|
||||||
|
Buffer<uint8_t> src;
|
||||||
|
void run();
|
||||||
|
|
||||||
void SetCols(unsigned char *s, int rows, int cols, int src_width) {
|
void SetCols(unsigned char *s, int rows, int cols, int src_width) {
|
||||||
for (int r = 0; r < rows; r++) {
|
for (int r = 0; r < rows; r++) {
|
||||||
for (int c = 0; c < cols; c++) {
|
for (int c = 0; c < cols; c++) {
|
||||||
@@ -197,11 +207,11 @@ class VpxMbPostProcAcrossIpTest
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(VpxMbPostProcAcrossIpTest, CheckLowFilterOutput) {
|
void VpxMbPostProcAcrossIpTest::run() {
|
||||||
const int rows = 16;
|
mbPostProcAcrossIp(src.TopLeftPixel(), src.stride(), rows, cols, q2mbl(0));
|
||||||
const int cols = 16;
|
}
|
||||||
|
|
||||||
Buffer<uint8_t> src = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
TEST_P(VpxMbPostProcAcrossIpTest, CheckLowFilterOutput) {
|
||||||
ASSERT_TRUE(src.Init());
|
ASSERT_TRUE(src.Init());
|
||||||
src.SetPadding(10);
|
src.SetPadding(10);
|
||||||
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
||||||
@@ -215,15 +225,11 @@ TEST_P(VpxMbPostProcAcrossIpTest, CheckLowFilterOutput) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(VpxMbPostProcAcrossIpTest, CheckMediumFilterOutput) {
|
TEST_P(VpxMbPostProcAcrossIpTest, CheckMediumFilterOutput) {
|
||||||
const int rows = 16;
|
|
||||||
const int cols = 16;
|
|
||||||
|
|
||||||
Buffer<uint8_t> src = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
|
||||||
ASSERT_TRUE(src.Init());
|
ASSERT_TRUE(src.Init());
|
||||||
src.SetPadding(10);
|
src.SetPadding(10);
|
||||||
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
||||||
|
|
||||||
static const unsigned char kExpectedOutput[cols] = {
|
static const unsigned char kExpectedOutput[] = {
|
||||||
2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13
|
2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -232,15 +238,11 @@ TEST_P(VpxMbPostProcAcrossIpTest, CheckMediumFilterOutput) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(VpxMbPostProcAcrossIpTest, CheckHighFilterOutput) {
|
TEST_P(VpxMbPostProcAcrossIpTest, CheckHighFilterOutput) {
|
||||||
const int rows = 16;
|
|
||||||
const int cols = 16;
|
|
||||||
|
|
||||||
Buffer<uint8_t> src = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
|
||||||
ASSERT_TRUE(src.Init());
|
ASSERT_TRUE(src.Init());
|
||||||
src.SetPadding(10);
|
src.SetPadding(10);
|
||||||
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
||||||
|
|
||||||
static const unsigned char kExpectedOutput[cols] = {
|
static const unsigned char kExpectedOutput[] = {
|
||||||
2, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13
|
2, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -254,9 +256,6 @@ TEST_P(VpxMbPostProcAcrossIpTest, CheckHighFilterOutput) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(VpxMbPostProcAcrossIpTest, CheckCvsAssembly) {
|
TEST_P(VpxMbPostProcAcrossIpTest, CheckCvsAssembly) {
|
||||||
const int rows = 16;
|
|
||||||
const int cols = 16;
|
|
||||||
|
|
||||||
Buffer<uint8_t> c_mem = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
Buffer<uint8_t> c_mem = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
||||||
ASSERT_TRUE(c_mem.Init());
|
ASSERT_TRUE(c_mem.Init());
|
||||||
Buffer<uint8_t> asm_mem = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
Buffer<uint8_t> asm_mem = Buffer<uint8_t>(cols, rows, 8, 8, 17, 8);
|
||||||
@@ -279,6 +278,16 @@ TEST_P(VpxMbPostProcAcrossIpTest, CheckCvsAssembly) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(VpxMbPostProcAcrossIpTest, DISABLED_Speed) {
|
||||||
|
ASSERT_TRUE(src.Init());
|
||||||
|
src.SetPadding(10);
|
||||||
|
|
||||||
|
SetCols(src.TopLeftPixel(), rows, cols, src.stride());
|
||||||
|
|
||||||
|
runNTimes(100000);
|
||||||
|
printMedian("16x16");
|
||||||
|
}
|
||||||
|
|
||||||
class VpxMbPostProcDownTest
|
class VpxMbPostProcDownTest
|
||||||
: public AbstractBench,
|
: public AbstractBench,
|
||||||
public ::testing::TestWithParam<VpxMbPostProcDownFunc> {
|
public ::testing::TestWithParam<VpxMbPostProcDownFunc> {
|
||||||
@@ -491,6 +500,10 @@ INSTANTIATE_TEST_CASE_P(MSA, VpxMbPostProcDownTest,
|
|||||||
#endif // HAVE_MSA
|
#endif // HAVE_MSA
|
||||||
|
|
||||||
#if HAVE_VSX
|
#if HAVE_VSX
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_CASE_P(VSX, VpxMbPostProcAcrossIpTest,
|
||||||
|
::testing::Values(vpx_mbpost_proc_across_ip_vsx));
|
||||||
|
|
||||||
INSTANTIATE_TEST_CASE_P(VSX, VpxMbPostProcDownTest,
|
INSTANTIATE_TEST_CASE_P(VSX, VpxMbPostProcDownTest,
|
||||||
::testing::Values(vpx_mbpost_proc_down_vsx));
|
::testing::Values(vpx_mbpost_proc_down_vsx));
|
||||||
#endif // HAVE_VSX
|
#endif // HAVE_VSX
|
||||||
|
|||||||
@@ -19,9 +19,170 @@ static const uint8x16_t load_merge = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A,
|
|||||||
0x0C, 0x0E, 0x18, 0x19, 0x1A, 0x1B,
|
0x0C, 0x0E, 0x18, 0x19, 0x1A, 0x1B,
|
||||||
0x1C, 0x1D, 0x1E, 0x1F };
|
0x1C, 0x1D, 0x1E, 0x1F };
|
||||||
|
|
||||||
static const uint8x16_t mask_merge = { 0x00, 0x01, 0x10, 0x11, 0x04, 0x05,
|
// C: s[c + 7]
|
||||||
0x14, 0x15, 0x08, 0x09, 0x18, 0x19,
|
static INLINE int16x8_t next7l_s16(uint8x16_t c) {
|
||||||
0x0C, 0x0D, 0x1C, 0x1D };
|
static const uint8x16_t next7_perm = {
|
||||||
|
0x07, 0x10, 0x08, 0x11, 0x09, 0x12, 0x0A, 0x13,
|
||||||
|
0x0B, 0x14, 0x0C, 0x15, 0x0D, 0x16, 0x0E, 0x17,
|
||||||
|
};
|
||||||
|
return (int16x8_t)vec_perm(c, vec_zeros_u8, next7_perm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slide across window and add.
|
||||||
|
static INLINE int16x8_t slide_sum_s16(int16x8_t x) {
|
||||||
|
// x = A B C D E F G H
|
||||||
|
//
|
||||||
|
// 0 A B C D E F G
|
||||||
|
const int16x8_t sum1 = vec_add(x, vec_slo(x, vec_splats((int8_t)(2 << 3))));
|
||||||
|
// 0 0 A B C D E F
|
||||||
|
const int16x8_t sum2 = vec_add(vec_slo(x, vec_splats((int8_t)(4 << 3))),
|
||||||
|
// 0 0 0 A B C D E
|
||||||
|
vec_slo(x, vec_splats((int8_t)(6 << 3))));
|
||||||
|
// 0 0 0 0 A B C D
|
||||||
|
const int16x8_t sum3 = vec_add(vec_slo(x, vec_splats((int8_t)(8 << 3))),
|
||||||
|
// 0 0 0 0 0 A B C
|
||||||
|
vec_slo(x, vec_splats((int8_t)(10 << 3))));
|
||||||
|
// 0 0 0 0 0 0 A B
|
||||||
|
const int16x8_t sum4 = vec_add(vec_slo(x, vec_splats((int8_t)(12 << 3))),
|
||||||
|
// 0 0 0 0 0 0 0 A
|
||||||
|
vec_slo(x, vec_splats((int8_t)(14 << 3))));
|
||||||
|
return vec_add(vec_add(sum1, sum2), vec_add(sum3, sum4));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slide across window and add.
|
||||||
|
static INLINE int32x4_t slide_sumsq_s32(int32x4_t xsq_even, int32x4_t xsq_odd) {
|
||||||
|
// 0 A C E
|
||||||
|
// + 0 B D F
|
||||||
|
int32x4_t sumsq_1 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(4 << 3))),
|
||||||
|
vec_slo(xsq_odd, vec_splats((int8_t)(4 << 3))));
|
||||||
|
// 0 0 A C
|
||||||
|
// + 0 0 B D
|
||||||
|
int32x4_t sumsq_2 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(8 << 3))),
|
||||||
|
vec_slo(xsq_odd, vec_splats((int8_t)(8 << 3))));
|
||||||
|
// 0 0 0 A
|
||||||
|
// + 0 0 0 B
|
||||||
|
int32x4_t sumsq_3 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(12 << 3))),
|
||||||
|
vec_slo(xsq_odd, vec_splats((int8_t)(12 << 3))));
|
||||||
|
sumsq_1 = vec_add(sumsq_1, xsq_even);
|
||||||
|
sumsq_2 = vec_add(sumsq_2, sumsq_3);
|
||||||
|
return vec_add(sumsq_1, sumsq_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
// C: (b + sum + val) >> 4
|
||||||
|
static INLINE int16x8_t filter_s16(int16x8_t b, int16x8_t sum, int16x8_t val) {
|
||||||
|
return vec_sra(vec_add(vec_add(b, sum), val), vec_splats((uint16_t)4));
|
||||||
|
}
|
||||||
|
|
||||||
|
// C: sumsq * 15 - sum * sum
|
||||||
|
static INLINE bool16x8_t mask_s16(int32x4_t sumsq_even, int32x4_t sumsq_odd,
|
||||||
|
int16x8_t sum, int32x4_t lim) {
|
||||||
|
static const uint8x16_t mask_merge = { 0x00, 0x01, 0x10, 0x11, 0x04, 0x05,
|
||||||
|
0x14, 0x15, 0x08, 0x09, 0x18, 0x19,
|
||||||
|
0x0C, 0x0D, 0x1C, 0x1D };
|
||||||
|
const int32x4_t sumsq_odd_scaled =
|
||||||
|
vec_mul(sumsq_odd, vec_splats((int32_t)15));
|
||||||
|
const int32x4_t sumsq_even_scaled =
|
||||||
|
vec_mul(sumsq_even, vec_splats((int32_t)15));
|
||||||
|
const int32x4_t thres_odd = vec_sub(sumsq_odd_scaled, vec_mulo(sum, sum));
|
||||||
|
const int32x4_t thres_even = vec_sub(sumsq_even_scaled, vec_mule(sum, sum));
|
||||||
|
|
||||||
|
const bool32x4_t mask_odd = vec_cmplt(thres_odd, lim);
|
||||||
|
const bool32x4_t mask_even = vec_cmplt(thres_even, lim);
|
||||||
|
return vec_perm((bool16x8_t)mask_even, (bool16x8_t)mask_odd, mask_merge);
|
||||||
|
}
|
||||||
|
|
||||||
|
void vpx_mbpost_proc_across_ip_vsx(unsigned char *src, int pitch, int rows,
|
||||||
|
int cols, int flimit) {
|
||||||
|
int row, col;
|
||||||
|
const int32x4_t lim = vec_splats(flimit);
|
||||||
|
|
||||||
|
// 8 columns are processed at a time.
|
||||||
|
assert(cols % 8 == 0);
|
||||||
|
|
||||||
|
for (row = 0; row < rows; row++) {
|
||||||
|
// The sum is signed and requires at most 13 bits.
|
||||||
|
// (8 bits + sign) * 15 (4 bits)
|
||||||
|
int16x8_t sum;
|
||||||
|
// The sum of squares requires at most 20 bits.
|
||||||
|
// (16 bits + sign) * 15 (4 bits)
|
||||||
|
int32x4_t sumsq_even, sumsq_odd;
|
||||||
|
|
||||||
|
// Fill left context with first col.
|
||||||
|
int16x8_t left_ctx = vec_splats((int16_t)src[0]);
|
||||||
|
int16_t s = src[0] * 9;
|
||||||
|
int32_t ssq = src[0] * src[0] * 9 + 16;
|
||||||
|
|
||||||
|
// Fill the next 6 columns of the sliding window with cols 2 to 7.
|
||||||
|
for (col = 1; col <= 6; ++col) {
|
||||||
|
s += src[col];
|
||||||
|
ssq += src[col] * src[col];
|
||||||
|
}
|
||||||
|
// Set this sum to every element in the window.
|
||||||
|
sum = vec_splats(s);
|
||||||
|
sumsq_even = vec_splats(ssq);
|
||||||
|
sumsq_odd = vec_splats(ssq);
|
||||||
|
|
||||||
|
for (col = 0; col < cols; col += 8) {
|
||||||
|
bool16x8_t mask;
|
||||||
|
int16x8_t filtered, masked;
|
||||||
|
uint8x16_t out;
|
||||||
|
|
||||||
|
const uint8x16_t val = vec_vsx_ld(0, src + col);
|
||||||
|
const int16x8_t val_high = unpack_to_s16_h(val);
|
||||||
|
|
||||||
|
// C: s[c + 7]
|
||||||
|
const int16x8_t right_ctx = (col + 8 == cols)
|
||||||
|
? vec_splats((int16_t)src[col + 7])
|
||||||
|
: next7l_s16(val);
|
||||||
|
|
||||||
|
// C: x = s[c + 7] - s[c - 8];
|
||||||
|
const int16x8_t x = vec_sub(right_ctx, left_ctx);
|
||||||
|
const int32x4_t xsq_even =
|
||||||
|
vec_sub(vec_mule(right_ctx, right_ctx), vec_mule(left_ctx, left_ctx));
|
||||||
|
const int32x4_t xsq_odd =
|
||||||
|
vec_sub(vec_mulo(right_ctx, right_ctx), vec_mulo(left_ctx, left_ctx));
|
||||||
|
|
||||||
|
const int32x4_t sumsq_tmp = slide_sumsq_s32(xsq_even, xsq_odd);
|
||||||
|
// A C E G
|
||||||
|
// 0 B D F
|
||||||
|
// 0 A C E
|
||||||
|
// 0 0 B D
|
||||||
|
// 0 0 A C
|
||||||
|
// 0 0 0 B
|
||||||
|
// 0 0 0 A
|
||||||
|
sumsq_even = vec_add(sumsq_even, sumsq_tmp);
|
||||||
|
// B D F G
|
||||||
|
// A C E G
|
||||||
|
// 0 B D F
|
||||||
|
// 0 A C E
|
||||||
|
// 0 0 B D
|
||||||
|
// 0 0 A C
|
||||||
|
// 0 0 0 B
|
||||||
|
// 0 0 0 A
|
||||||
|
sumsq_odd = vec_add(sumsq_odd, vec_add(sumsq_tmp, xsq_odd));
|
||||||
|
|
||||||
|
sum = vec_add(sum, slide_sum_s16(x));
|
||||||
|
|
||||||
|
// C: (8 + sum + s[c]) >> 4
|
||||||
|
filtered = filter_s16(vec_splats((int16_t)8), sum, val_high);
|
||||||
|
// C: sumsq * 15 - sum * sum
|
||||||
|
mask = mask_s16(sumsq_even, sumsq_odd, sum, lim);
|
||||||
|
masked = vec_sel(val_high, filtered, mask);
|
||||||
|
|
||||||
|
out = vec_perm((uint8x16_t)masked, vec_vsx_ld(0, src + col), load_merge);
|
||||||
|
vec_vsx_st(out, 0, src + col);
|
||||||
|
|
||||||
|
// Update window sum and square sum
|
||||||
|
sum = vec_splat(sum, 7);
|
||||||
|
sumsq_even = vec_splat(sumsq_odd, 3);
|
||||||
|
sumsq_odd = vec_splat(sumsq_odd, 3);
|
||||||
|
|
||||||
|
// C: s[c - 8] (for next iteration)
|
||||||
|
left_ctx = val_high;
|
||||||
|
}
|
||||||
|
src += pitch;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void vpx_mbpost_proc_down_vsx(uint8_t *dst, int pitch, int rows, int cols,
|
void vpx_mbpost_proc_down_vsx(uint8_t *dst, int pitch, int rows, int cols,
|
||||||
int flimit) {
|
int flimit) {
|
||||||
@@ -66,9 +227,6 @@ void vpx_mbpost_proc_down_vsx(uint8_t *dst, int pitch, int rows, int cols,
|
|||||||
|
|
||||||
for (row = 0; row < rows; row++) {
|
for (row = 0; row < rows; row++) {
|
||||||
int32x4_t d15_even, d15_odd, d0_even, d0_odd;
|
int32x4_t d15_even, d15_odd, d0_even, d0_odd;
|
||||||
int32x4_t sumsq_odd_scaled, sumsq_even_scaled;
|
|
||||||
int32x4_t thres_odd, thres_even;
|
|
||||||
bool32x4_t mask_odd, mask_even;
|
|
||||||
bool16x8_t mask;
|
bool16x8_t mask;
|
||||||
int16x8_t filtered, masked;
|
int16x8_t filtered, masked;
|
||||||
uint8x16_t out;
|
uint8x16_t out;
|
||||||
@@ -96,19 +254,11 @@ void vpx_mbpost_proc_down_vsx(uint8_t *dst, int pitch, int rows, int cols,
|
|||||||
sumsq_odd = vec_add(sumsq_odd, vec_sub(d15_odd, d0_odd));
|
sumsq_odd = vec_add(sumsq_odd, vec_sub(d15_odd, d0_odd));
|
||||||
sumsq_even = vec_add(sumsq_even, vec_sub(d15_even, d0_even));
|
sumsq_even = vec_add(sumsq_even, vec_sub(d15_even, d0_even));
|
||||||
|
|
||||||
// C: sumsq * 15 - sum * sum
|
|
||||||
sumsq_odd_scaled = vec_mul(sumsq_odd, vec_splats((int32_t)15));
|
|
||||||
sumsq_even_scaled = vec_mul(sumsq_even, vec_splats((int32_t)15));
|
|
||||||
thres_odd = vec_sub(sumsq_odd_scaled, vec_mulo(sum, sum));
|
|
||||||
thres_even = vec_sub(sumsq_even_scaled, vec_mule(sum, sum));
|
|
||||||
|
|
||||||
// C: (vpx_rv[(r & 127) + (c & 7)] + sum + s[0]) >> 4
|
// C: (vpx_rv[(r & 127) + (c & 7)] + sum + s[0]) >> 4
|
||||||
filtered = vec_add(vec_add(rv, sum), window[8]);
|
filtered = filter_s16(rv, sum, window[8]);
|
||||||
filtered = vec_sra(filtered, vec_splats((uint16_t)4));
|
|
||||||
|
|
||||||
mask_odd = vec_cmplt(thres_odd, lim);
|
// C: sumsq * 15 - sum * sum
|
||||||
mask_even = vec_cmplt(thres_even, lim);
|
mask = mask_s16(sumsq_even, sumsq_odd, sum, lim);
|
||||||
mask = vec_perm((bool16x8_t)mask_even, (bool16x8_t)mask_odd, mask_merge);
|
|
||||||
masked = vec_sel(window[8], filtered, mask);
|
masked = vec_sel(window[8], filtered, mask);
|
||||||
|
|
||||||
// TODO(ltrudeau) If cols % 16 == 0, we could just process 16 per
|
// TODO(ltrudeau) If cols % 16 == 0, we could just process 16 per
|
||||||
|
|||||||
@@ -67,10 +67,13 @@ static const uint8x16_t xxpermdi3_perm = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static const uint8x16_t vec_zeros_u8 = { 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0 };
|
||||||
static const int16x8_t vec_zeros_s16 = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
static const int16x8_t vec_zeros_s16 = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||||
static const int16x8_t vec_ones_s16 = { 1, 1, 1, 1, 1, 1, 1, 1 };
|
static const int16x8_t vec_ones_s16 = { 1, 1, 1, 1, 1, 1, 1, 1 };
|
||||||
static const uint16x8_t vec_ones_u16 = { 1, 1, 1, 1, 1, 1, 1, 1 };
|
static const uint16x8_t vec_ones_u16 = { 1, 1, 1, 1, 1, 1, 1, 1 };
|
||||||
static const uint32x4_t vec_ones_u32 = { 1, 1, 1, 1 };
|
static const uint32x4_t vec_ones_u32 = { 1, 1, 1, 1 };
|
||||||
|
static const int32x4_t vec_zeros_s32 = { 0, 0, 0, 0 };
|
||||||
static const uint16x8_t vec_shift_sign_s16 = { 15, 15, 15, 15, 15, 15, 15, 15 };
|
static const uint16x8_t vec_shift_sign_s16 = { 15, 15, 15, 15, 15, 15, 15, 15 };
|
||||||
static const uint32x4_t vec_shift_sign_s32 = { 31, 31, 31, 31 };
|
static const uint32x4_t vec_shift_sign_s32 = { 31, 31, 31, 31 };
|
||||||
static const uint8x16_t vec_perm64 = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
|
static const uint8x16_t vec_perm64 = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
|
||||||
|
|||||||
@@ -1601,7 +1601,7 @@ if (vpx_config("CONFIG_POSTPROC") eq "yes" || vpx_config("CONFIG_VP9_POSTPROC")
|
|||||||
specialize qw/vpx_mbpost_proc_down sse2 neon msa vsx/;
|
specialize qw/vpx_mbpost_proc_down sse2 neon msa vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_mbpost_proc_across_ip/, "unsigned char *dst, int pitch, int rows, int cols,int flimit";
|
add_proto qw/void vpx_mbpost_proc_across_ip/, "unsigned char *dst, int pitch, int rows, int cols,int flimit";
|
||||||
specialize qw/vpx_mbpost_proc_across_ip sse2 neon msa/;
|
specialize qw/vpx_mbpost_proc_across_ip sse2 neon msa vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_post_proc_down_and_across_mb_row/, "unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size";
|
add_proto qw/void vpx_post_proc_down_and_across_mb_row/, "unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size";
|
||||||
specialize qw/vpx_post_proc_down_and_across_mb_row sse2 neon msa/;
|
specialize qw/vpx_post_proc_down_and_across_mb_row sse2 neon msa/;
|
||||||
|
|||||||
Reference in New Issue
Block a user