1/*** 2 This file is part of PulseAudio. 3 4 Copyright 2004-2006 Lennart Poettering 5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB 6 7 PulseAudio is free software; you can redistribute it and/or modify 8 it under the terms of the GNU Lesser General Public License as published 9 by the Free Software Foundation; either version 2.1 of the License, 10 or (at your option) any later version. 11 12 PulseAudio is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU Lesser General Public License 18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. 19***/ 20 21#ifdef HAVE_CONFIG_H 22#include <config.h> 23#endif 24 25#include <stdio.h> 26#include <stdlib.h> 27 28#include <pulsecore/macro.h> 29#include <pulsecore/endianmacros.h> 30 31#include "cpu-x86.h" 32#include "sconv.h" 33 34#if (!defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) && defined (__i386__)) || defined (__amd64__) 35 36static const PA_DECLARE_ALIGNED (16, float, scale[4]) = { 0x8000, 0x8000, 0x8000, 0x8000 }; 37 38static void pa_sconv_s16le_from_f32ne_sse(unsigned n, const float *a, int16_t *b) { 39 pa_reg_x86 temp, i; 40 41 __asm__ __volatile__ ( 42 " movaps %5, %%xmm5 \n\t" 43 " xor %0, %0 \n\t" 44 45 " mov %4, %1 \n\t" 46 " sar $3, %1 \n\t" /* 8 floats at a time */ 47 " cmp $0, %1 \n\t" 48 " je 2f \n\t" 49 50 "1: \n\t" 51 " movups (%q2, %0, 2), %%xmm0 \n\t" /* read 8 floats */ 52 " movups 16(%q2, %0, 2), %%xmm2 \n\t" 53 " mulps %%xmm5, %%xmm0 \n\t" /* *= 0x8000 */ 54 " mulps %%xmm5, %%xmm2 \n\t" 55 56 " cvtps2pi %%xmm0, %%mm0 \n\t" /* low part to int */ 57 " cvtps2pi %%xmm2, %%mm2 \n\t" 58 " movhlps %%xmm0, %%xmm0 \n\t" /* bring high part in position */ 59 " movhlps %%xmm2, %%xmm2 \n\t" 60 " cvtps2pi %%xmm0, %%mm1 \n\t" /* high part to int */ 61 " cvtps2pi %%xmm2, %%mm3 \n\t" 62 63 " packssdw %%mm1, %%mm0 \n\t" /* pack parts */ 64 " packssdw %%mm3, %%mm2 \n\t" 65 " movq %%mm0, (%q3, %0) \n\t" 66 " movq %%mm2, 8(%q3, %0) \n\t" 67 68 " add $16, %0 \n\t" 69 " dec %1 \n\t" 70 " jne 1b \n\t" 71 72 "2: \n\t" 73 " mov %4, %1 \n\t" /* prepare for leftovers */ 74 " and $7, %1 \n\t" 75 " je 5f \n\t" 76 77 "3: \n\t" 78 " movss (%q2, %0, 2), %%xmm0 \n\t" 79 " mulss %%xmm5, %%xmm0 \n\t" 80 " cvtss2si %%xmm0, %4 \n\t" 81 " add $0x8000, %4 \n\t" /* check for saturation */ 82 " and $~0xffff, %4 \n\t" 83 " cvtss2si %%xmm0, %4 \n\t" 84 " je 4f \n\t" 85 " sar $31, %4 \n\t" 86 " xor $0x7fff, %4 \n\t" 87 88 "4: \n\t" 89 " movw %w4, (%q3, %0) \n\t" /* store leftover */ 90 " add $2, %0 \n\t" 91 " dec %1 \n\t" 92 " jne 3b \n\t" 93 94 "5: \n\t" 95 " emms \n\t" 96 97 : "=&r" (i), "=&r" (temp) 98 : "r" (a), "r" (b), "r" ((pa_reg_x86)n), "m" (*scale) 99 : "cc", "memory" 100 ); 101} 102 103static void pa_sconv_s16le_from_f32ne_sse2(unsigned n, const float *a, int16_t *b) { 104 pa_reg_x86 temp, i; 105 106 __asm__ __volatile__ ( 107 " movaps %5, %%xmm5 \n\t" 108 " xor %0, %0 \n\t" 109 110 " mov %4, %1 \n\t" 111 " sar $3, %1 \n\t" /* 8 floats at a time */ 112 " cmp $0, %1 \n\t" 113 " je 2f \n\t" 114 115 "1: \n\t" 116 " movups (%q2, %0, 2), %%xmm0 \n\t" /* read 8 floats */ 117 " movups 16(%q2, %0, 2), %%xmm2 \n\t" 118 " mulps %%xmm5, %%xmm0 \n\t" /* *= 0x8000 */ 119 " mulps %%xmm5, %%xmm2 \n\t" 120 121 " cvtps2dq %%xmm0, %%xmm0 \n\t" 122 " cvtps2dq %%xmm2, %%xmm2 \n\t" 123 124 " packssdw %%xmm2, %%xmm0 \n\t" 125 " movdqu %%xmm0, (%q3, %0) \n\t" 126 127 " add $16, %0 \n\t" 128 " dec %1 \n\t" 129 " jne 1b \n\t" 130 131 "2: \n\t" 132 " mov %4, %1 \n\t" /* prepare for leftovers */ 133 " and $7, %1 \n\t" 134 " je 5f \n\t" 135 136 "3: \n\t" 137 " movss (%q2, %0, 2), %%xmm0 \n\t" 138 " mulss %%xmm5, %%xmm0 \n\t" 139 " cvtss2si %%xmm0, %4 \n\t" 140 " add $0x8000, %4 \n\t" 141 " and $~0xffff, %4 \n\t" /* check for saturation */ 142 " cvtss2si %%xmm0, %4 \n\t" 143 " je 4f \n\t" 144 " sar $31, %4 \n\t" 145 " xor $0x7fff, %4 \n\t" 146 147 "4: \n\t" 148 " movw %w4, (%q3, %0) \n\t" /* store leftover */ 149 " add $2, %0 \n\t" 150 " dec %1 \n\t" 151 " jne 3b \n\t" 152 153 "5: \n\t" 154 155 : "=&r" (i), "=&r" (temp) 156 : "r" (a), "r" (b), "r" ((pa_reg_x86)n), "m" (*scale) 157 : "cc", "memory" 158 ); 159} 160 161#endif /* defined (__i386__) || defined (__amd64__) */ 162 163void pa_convert_func_init_sse(pa_cpu_x86_flag_t flags) { 164#if (!defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) && defined (__i386__)) || defined (__amd64__) 165 166 if (flags & PA_CPU_X86_SSE2) { 167 pa_log_info("Initialising SSE2 optimized conversions."); 168 pa_set_convert_from_float32ne_function(PA_SAMPLE_S16LE, (pa_convert_func_t) pa_sconv_s16le_from_f32ne_sse2); 169 pa_set_convert_to_s16ne_function(PA_SAMPLE_FLOAT32LE, (pa_convert_func_t) pa_sconv_s16le_from_f32ne_sse2); 170 } else if (flags & PA_CPU_X86_SSE) { 171 pa_log_info("Initialising SSE optimized conversions."); 172 pa_set_convert_from_float32ne_function(PA_SAMPLE_S16LE, (pa_convert_func_t) pa_sconv_s16le_from_f32ne_sse); 173 pa_set_convert_to_s16ne_function(PA_SAMPLE_FLOAT32LE, (pa_convert_func_t) pa_sconv_s16le_from_f32ne_sse); 174 } 175 176#endif /* defined (__i386__) || defined (__amd64__) */ 177} 178