xref: /third_party/ffmpeg/libavutil/arm/intmath.h (revision cabdff1a)
1/*
2 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#ifndef AVUTIL_ARM_INTMATH_H
22#define AVUTIL_ARM_INTMATH_H
23
24#include <stdint.h>
25
26#include "config.h"
27#include "libavutil/attributes.h"
28
29#if HAVE_INLINE_ASM
30
31#if HAVE_ARMV6_INLINE
32
33#define av_clip_uint8 av_clip_uint8_arm
34static av_always_inline av_const int av_clip_uint8_arm(int a)
35{
36    int x;
37    __asm__ ("usat %0, #8,  %1" : "=r"(x) : "r"(a));
38    return x;
39}
40
41#define av_clip_int8 av_clip_int8_arm
42static av_always_inline av_const int av_clip_int8_arm(int a)
43{
44    int x;
45    __asm__ ("ssat %0, #8,  %1" : "=r"(x) : "r"(a));
46    return x;
47}
48
49#define av_clip_uint16 av_clip_uint16_arm
50static av_always_inline av_const int av_clip_uint16_arm(int a)
51{
52    int x;
53    __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
54    return x;
55}
56
57#define av_clip_int16 av_clip_int16_arm
58static av_always_inline av_const int av_clip_int16_arm(int a)
59{
60    int x;
61    __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
62    return x;
63}
64
65#define av_clip_intp2 av_clip_intp2_arm
66static av_always_inline av_const int av_clip_intp2_arm(int a, int p)
67{
68    unsigned x;
69    __asm__ ("ssat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p+1));
70    return x;
71}
72
73#define av_clip_uintp2 av_clip_uintp2_arm
74static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
75{
76    unsigned x;
77    __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
78    return x;
79}
80
81#define av_sat_add32 av_sat_add32_arm
82static av_always_inline int av_sat_add32_arm(int a, int b)
83{
84    int r;
85    __asm__ ("qadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
86    return r;
87}
88
89#define av_sat_dadd32 av_sat_dadd32_arm
90static av_always_inline int av_sat_dadd32_arm(int a, int b)
91{
92    int r;
93    __asm__ ("qdadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
94    return r;
95}
96
97#define av_sat_sub32 av_sat_sub32_arm
98static av_always_inline int av_sat_sub32_arm(int a, int b)
99{
100    int r;
101    __asm__ ("qsub %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
102    return r;
103}
104
105#define av_sat_dsub32 av_sat_dsub32_arm
106static av_always_inline int av_sat_dsub32_arm(int a, int b)
107{
108    int r;
109    __asm__ ("qdsub %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
110    return r;
111}
112
113#endif /* HAVE_ARMV6_INLINE */
114
115#if HAVE_ASM_MOD_Q
116
117#define av_clipl_int32 av_clipl_int32_arm
118static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
119{
120    int x, y;
121    __asm__ ("adds   %1, %R2, %Q2, lsr #31  \n\t"
122             "itet   ne                     \n\t"
123             "mvnne  %1, #1<<31             \n\t"
124             "moveq  %0, %Q2                \n\t"
125             "eorne  %0, %1,  %R2, asr #31  \n\t"
126             : "=r"(x), "=&r"(y) : "r"(a) : "cc");
127    return x;
128}
129
130#endif /* HAVE_ASM_MOD_Q */
131
132#endif /* HAVE_INLINE_ASM */
133
134#endif /* AVUTIL_ARM_INTMATH_H */
135