xref: /third_party/libsnd/src/ALAC/matrix_enc.c (revision b815c7f3)
1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2012-2014 Erik de Castro Lopo <erikd@mega-nerd.com>
4 *
5 * @APPLE_APACHE_LICENSE_HEADER_START@
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License") ;
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *     http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * @APPLE_APACHE_LICENSE_HEADER_END@
20 */
21
22/*
23	File:		matrix_enc.c
24
25	Contains:	ALAC mixing/matrixing encode routines.
26
27	Copyright:	(c) 2004-2011 Apple, Inc.
28*/
29
30#include "matrixlib.h"
31#include "ALACAudioTypes.h"
32
33/*
34    There is no plain middle-side option ; instead there are various mixing
35    modes including middle-side, each lossless, as embodied in the mix ()
36    and unmix () functions.  These functions exploit a generalized middle-side
37    transformation:
38
39    u := [(rL + (m-r)R)/m] ;
40    v := L - R ;
41
42    where [ ] denotes integer floor.  The (lossless) inverse is
43
44    L = u + v - [rV/m] ;
45    R = L - v ;
46*/
47
48// 16-bit routines
49
50void
51mix16 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t mixbits, int32_t mixres)
52{
53	int32_t		j ;
54
55	if (mixres != 0)
56	{
57		int32_t		mod = 1 << mixbits ;
58		int32_t		m2 ;
59
60		/* matrixed stereo */
61		m2 = mod - mixres ;
62		for (j = 0 ; j < numSamples ; j++)
63		{
64			int32_t		l, r ;
65
66			l = in [0] >> 16 ;
67			r = in [1] >> 16 ;
68			in += stride ;
69			u [j] = (mixres * l + m2 * r) >> mixbits ;
70			v [j] = l - r ;
71		}
72	}
73	else
74	{
75		/* Conventional separated stereo. */
76		for (j = 0 ; j < numSamples ; j++)
77		{
78			u [j] = in [0] >> 16 ;
79			v [j] = in [1] >> 16 ;
80			in += stride ;
81		}
82	}
83}
84
85// 20-bit routines
86// - the 20 bits of data are left-justified in 3 bytes of storage but right-aligned for input/output predictor buffers
87
88void
89mix20 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t mixbits, int32_t mixres)
90{
91	int32_t		l, r ;
92	int32_t		j ;
93
94	if (mixres != 0)
95	{
96		/* matrixed stereo */
97		int32_t		mod = 1 << mixbits ;
98		int32_t		m2 = mod - mixres ;
99
100		for (j = 0 ; j < numSamples ; j++)
101		{
102			l = in [0] >> 12 ;
103			r = in [1] >> 12 ;
104			in += stride ;
105
106			u [j] = (mixres * l + m2 * r) >> mixbits ;
107			v [j] = l - r ;
108		}
109	}
110	else
111	{
112		/* Conventional separated stereo. */
113		for (j = 0 ; j < numSamples ; j++)
114		{
115			u [j] = in [0] >> 12 ;
116			v [j] = in [1] >> 12 ;
117			in += stride ;
118		}
119	}
120}
121
122// 24-bit routines
123// - the 24 bits of data are right-justified in the input/output predictor buffers
124
125void
126mix24 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples,
127			int32_t mixbits, int32_t mixres, uint16_t * shiftUV, int32_t bytesShifted)
128{
129	int32_t		l, r ;
130	int32_t		shift = bytesShifted * 8 ;
131	uint32_t	mask = (1ul << shift) - 1 ;
132	int32_t		j, k ;
133
134	if (mixres != 0)
135	{
136		/* matrixed stereo */
137		int32_t		mod = 1 << mixbits ;
138		int32_t		m2 = mod - mixres ;
139
140		if (bytesShifted != 0)
141		{
142			for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
143			{
144				l = in [0] >> 8 ;
145				r = in [1] >> 8 ;
146				in += stride ;
147
148				shiftUV [k + 0] = (uint16_t) (l & mask) ;
149				shiftUV [k + 1] = (uint16_t) (r & mask) ;
150
151				l >>= shift ;
152				r >>= shift ;
153
154				u [j] = (mixres * l + m2 * r) >> mixbits ;
155				v [j] = l - r ;
156			}
157		}
158		else
159		{
160			for (j = 0 ; j < numSamples ; j++)
161			{
162				l = in [0] >> 8 ;
163				r = in [1] >> 8 ;
164				in += stride ;
165
166				u [j] = (mixres * l + m2 * r) >> mixbits ;
167				v [j] = l - r ;
168			}
169		}
170	}
171	else
172	{
173		/* Conventional separated stereo. */
174		if (bytesShifted != 0)
175		{
176			for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
177			{
178				l = in [0] >> 8 ;
179				r = in [1] >> 8 ;
180				in += stride ;
181
182				shiftUV [k + 0] = (uint16_t) (l & mask) ;
183				shiftUV [k + 1] = (uint16_t) (r & mask) ;
184
185				l >>= shift ;
186				r >>= shift ;
187
188				u [j] = l ;
189				v [j] = r ;
190			}
191		}
192		else
193		{
194			for (j = 0 ; j < numSamples ; j++)
195			{
196				l = in [0] >> 8 ;
197				r = in [1] >> 8 ;
198				in += stride ;
199			}
200		}
201	}
202}
203
204// 32-bit routines
205// - note that these really expect the internal data width to be < 32 but the arrays are 32-bit
206// - otherwise, the calculations might overflow into the 33rd bit and be lost
207// - therefore, these routines deal with the specified "unused lower" bytes in the "shift" buffers
208
209void
210mix32 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples,
211			int32_t mixbits, int32_t mixres, uint16_t * shiftUV, int32_t bytesShifted)
212{
213	int32_t		shift = bytesShifted * 8 ;
214	uint32_t	mask = (1ul << shift) - 1 ;
215	int32_t		l, r ;
216	int32_t		j, k ;
217
218	if (mixres != 0)
219	{
220		int32_t		mod = 1 << mixbits ;
221		int32_t		m2 ;
222
223		//Assert (bytesShifted != 0) ;
224
225		/* matrixed stereo with shift */
226		m2 = mod - mixres ;
227		for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
228		{
229			l = in [0] ;
230			r = in [1] ;
231			in += stride ;
232
233			shiftUV [k + 0] = (uint16_t) (l & mask) ;
234			shiftUV [k + 1] = (uint16_t) (r & mask) ;
235
236			l >>= shift ;
237			r >>= shift ;
238
239			u [j] = (mixres * l + m2 * r) >> mixbits ;
240			v [j] = l - r ;
241		}
242	}
243	else
244	{
245		if (bytesShifted == 0)
246		{
247			/* de-interleaving w/o shift */
248			for (j = 0 ; j < numSamples ; j++)
249			{
250				u [j] = in [0] ;
251				v [j] = in [1] ;
252				in += stride ;
253			}
254		}
255		else
256		{
257			/* de-interleaving with shift */
258			for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
259			{
260				l = in [0] ;
261				r = in [1] ;
262				in += stride ;
263
264				shiftUV [k + 0] = (uint16_t) (l & mask) ;
265				shiftUV [k + 1] = (uint16_t) (r & mask) ;
266
267				l >>= shift ;
268				r >>= shift ;
269
270				u [j] = l ;
271				v [j] = r ;
272			}
273		}
274	}
275}
276