1cabdff1aSopenharmony_ci/* 2cabdff1aSopenharmony_ci * Copyright (c) 2001-2003 The FFmpeg project 3cabdff1aSopenharmony_ci * 4cabdff1aSopenharmony_ci * first version by Francois Revol (revol@free.fr) 5cabdff1aSopenharmony_ci * fringe ADPCM codecs (e.g., DK3, DK4, Westwood) 6cabdff1aSopenharmony_ci * by Mike Melanson (melanson@pcisys.net) 7cabdff1aSopenharmony_ci * 8cabdff1aSopenharmony_ci * This file is part of FFmpeg. 9cabdff1aSopenharmony_ci * 10cabdff1aSopenharmony_ci * FFmpeg is free software; you can redistribute it and/or 11cabdff1aSopenharmony_ci * modify it under the terms of the GNU Lesser General Public 12cabdff1aSopenharmony_ci * License as published by the Free Software Foundation; either 13cabdff1aSopenharmony_ci * version 2.1 of the License, or (at your option) any later version. 14cabdff1aSopenharmony_ci * 15cabdff1aSopenharmony_ci * FFmpeg is distributed in the hope that it will be useful, 16cabdff1aSopenharmony_ci * but WITHOUT ANY WARRANTY; without even the implied warranty of 17cabdff1aSopenharmony_ci * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18cabdff1aSopenharmony_ci * Lesser General Public License for more details. 19cabdff1aSopenharmony_ci * 20cabdff1aSopenharmony_ci * You should have received a copy of the GNU Lesser General Public 21cabdff1aSopenharmony_ci * License along with FFmpeg; if not, write to the Free Software 22cabdff1aSopenharmony_ci * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 23cabdff1aSopenharmony_ci */ 24cabdff1aSopenharmony_ci 25cabdff1aSopenharmony_ci#include "config_components.h" 26cabdff1aSopenharmony_ci 27cabdff1aSopenharmony_ci#include "libavutil/opt.h" 28cabdff1aSopenharmony_ci 29cabdff1aSopenharmony_ci#include "avcodec.h" 30cabdff1aSopenharmony_ci#include "put_bits.h" 31cabdff1aSopenharmony_ci#include "bytestream.h" 32cabdff1aSopenharmony_ci#include "adpcm.h" 33cabdff1aSopenharmony_ci#include "adpcm_data.h" 34cabdff1aSopenharmony_ci#include "codec_internal.h" 35cabdff1aSopenharmony_ci#include "encode.h" 36cabdff1aSopenharmony_ci 37cabdff1aSopenharmony_ci/** 38cabdff1aSopenharmony_ci * @file 39cabdff1aSopenharmony_ci * ADPCM encoders 40cabdff1aSopenharmony_ci * See ADPCM decoder reference documents for codec information. 41cabdff1aSopenharmony_ci */ 42cabdff1aSopenharmony_ci 43cabdff1aSopenharmony_ci#define CASE_0(codec_id, ...) 44cabdff1aSopenharmony_ci#define CASE_1(codec_id, ...) \ 45cabdff1aSopenharmony_ci case codec_id: \ 46cabdff1aSopenharmony_ci { __VA_ARGS__ } \ 47cabdff1aSopenharmony_ci break; 48cabdff1aSopenharmony_ci#define CASE_2(enabled, codec_id, ...) \ 49cabdff1aSopenharmony_ci CASE_ ## enabled(codec_id, __VA_ARGS__) 50cabdff1aSopenharmony_ci#define CASE_3(config, codec_id, ...) \ 51cabdff1aSopenharmony_ci CASE_2(config, codec_id, __VA_ARGS__) 52cabdff1aSopenharmony_ci#define CASE(codec, ...) \ 53cabdff1aSopenharmony_ci CASE_3(CONFIG_ ## codec ## _ENCODER, AV_CODEC_ID_ ## codec, __VA_ARGS__) 54cabdff1aSopenharmony_ci 55cabdff1aSopenharmony_citypedef struct TrellisPath { 56cabdff1aSopenharmony_ci int nibble; 57cabdff1aSopenharmony_ci int prev; 58cabdff1aSopenharmony_ci} TrellisPath; 59cabdff1aSopenharmony_ci 60cabdff1aSopenharmony_citypedef struct TrellisNode { 61cabdff1aSopenharmony_ci uint32_t ssd; 62cabdff1aSopenharmony_ci int path; 63cabdff1aSopenharmony_ci int sample1; 64cabdff1aSopenharmony_ci int sample2; 65cabdff1aSopenharmony_ci int step; 66cabdff1aSopenharmony_ci} TrellisNode; 67cabdff1aSopenharmony_ci 68cabdff1aSopenharmony_citypedef struct ADPCMEncodeContext { 69cabdff1aSopenharmony_ci AVClass *class; 70cabdff1aSopenharmony_ci int block_size; 71cabdff1aSopenharmony_ci 72cabdff1aSopenharmony_ci ADPCMChannelStatus status[6]; 73cabdff1aSopenharmony_ci TrellisPath *paths; 74cabdff1aSopenharmony_ci TrellisNode *node_buf; 75cabdff1aSopenharmony_ci TrellisNode **nodep_buf; 76cabdff1aSopenharmony_ci uint8_t *trellis_hash; 77cabdff1aSopenharmony_ci} ADPCMEncodeContext; 78cabdff1aSopenharmony_ci 79cabdff1aSopenharmony_ci#define FREEZE_INTERVAL 128 80cabdff1aSopenharmony_ci 81cabdff1aSopenharmony_cistatic av_cold int adpcm_encode_init(AVCodecContext *avctx) 82cabdff1aSopenharmony_ci{ 83cabdff1aSopenharmony_ci ADPCMEncodeContext *s = avctx->priv_data; 84cabdff1aSopenharmony_ci int channels = avctx->ch_layout.nb_channels; 85cabdff1aSopenharmony_ci 86cabdff1aSopenharmony_ci /* 87cabdff1aSopenharmony_ci * AMV's block size has to match that of the corresponding video 88cabdff1aSopenharmony_ci * stream. Relax the POT requirement. 89cabdff1aSopenharmony_ci */ 90cabdff1aSopenharmony_ci if (avctx->codec->id != AV_CODEC_ID_ADPCM_IMA_AMV && 91cabdff1aSopenharmony_ci (s->block_size & (s->block_size - 1))) { 92cabdff1aSopenharmony_ci av_log(avctx, AV_LOG_ERROR, "block size must be power of 2\n"); 93cabdff1aSopenharmony_ci return AVERROR(EINVAL); 94cabdff1aSopenharmony_ci } 95cabdff1aSopenharmony_ci 96cabdff1aSopenharmony_ci if (avctx->trellis) { 97cabdff1aSopenharmony_ci int frontier, max_paths; 98cabdff1aSopenharmony_ci 99cabdff1aSopenharmony_ci if ((unsigned)avctx->trellis > 16U) { 100cabdff1aSopenharmony_ci av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); 101cabdff1aSopenharmony_ci return AVERROR(EINVAL); 102cabdff1aSopenharmony_ci } 103cabdff1aSopenharmony_ci 104cabdff1aSopenharmony_ci if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_SSI || 105cabdff1aSopenharmony_ci avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_APM || 106cabdff1aSopenharmony_ci avctx->codec->id == AV_CODEC_ID_ADPCM_ARGO || 107cabdff1aSopenharmony_ci avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_WS) { 108cabdff1aSopenharmony_ci /* 109cabdff1aSopenharmony_ci * The current trellis implementation doesn't work for extended 110cabdff1aSopenharmony_ci * runs of samples without periodic resets. Disallow it. 111cabdff1aSopenharmony_ci */ 112cabdff1aSopenharmony_ci av_log(avctx, AV_LOG_ERROR, "trellis not supported\n"); 113cabdff1aSopenharmony_ci return AVERROR_PATCHWELCOME; 114cabdff1aSopenharmony_ci } 115cabdff1aSopenharmony_ci 116cabdff1aSopenharmony_ci frontier = 1 << avctx->trellis; 117cabdff1aSopenharmony_ci max_paths = frontier * FREEZE_INTERVAL; 118cabdff1aSopenharmony_ci if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) || 119cabdff1aSopenharmony_ci !FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) || 120cabdff1aSopenharmony_ci !FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) || 121cabdff1aSopenharmony_ci !FF_ALLOC_TYPED_ARRAY(s->trellis_hash, 65536)) 122cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 123cabdff1aSopenharmony_ci } 124cabdff1aSopenharmony_ci 125cabdff1aSopenharmony_ci avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id); 126cabdff1aSopenharmony_ci 127cabdff1aSopenharmony_ci switch (avctx->codec->id) { 128cabdff1aSopenharmony_ci CASE(ADPCM_IMA_WAV, 129cabdff1aSopenharmony_ci /* each 16 bits sample gives one nibble 130cabdff1aSopenharmony_ci and we have 4 bytes per channel overhead */ 131cabdff1aSopenharmony_ci avctx->frame_size = (s->block_size - 4 * channels) * 8 / 132cabdff1aSopenharmony_ci (4 * channels) + 1; 133cabdff1aSopenharmony_ci /* seems frame_size isn't taken into account... 134cabdff1aSopenharmony_ci have to buffer the samples :-( */ 135cabdff1aSopenharmony_ci avctx->block_align = s->block_size; 136cabdff1aSopenharmony_ci avctx->bits_per_coded_sample = 4; 137cabdff1aSopenharmony_ci ) /* End of CASE */ 138cabdff1aSopenharmony_ci CASE(ADPCM_IMA_QT, 139cabdff1aSopenharmony_ci avctx->frame_size = 64; 140cabdff1aSopenharmony_ci avctx->block_align = 34 * channels; 141cabdff1aSopenharmony_ci ) /* End of CASE */ 142cabdff1aSopenharmony_ci CASE(ADPCM_MS, 143cabdff1aSopenharmony_ci uint8_t *extradata; 144cabdff1aSopenharmony_ci /* each 16 bits sample gives one nibble 145cabdff1aSopenharmony_ci and we have 7 bytes per channel overhead */ 146cabdff1aSopenharmony_ci avctx->frame_size = (s->block_size - 7 * channels) * 2 / channels + 2; 147cabdff1aSopenharmony_ci avctx->bits_per_coded_sample = 4; 148cabdff1aSopenharmony_ci avctx->block_align = s->block_size; 149cabdff1aSopenharmony_ci if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE))) 150cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 151cabdff1aSopenharmony_ci avctx->extradata_size = 32; 152cabdff1aSopenharmony_ci extradata = avctx->extradata; 153cabdff1aSopenharmony_ci bytestream_put_le16(&extradata, avctx->frame_size); 154cabdff1aSopenharmony_ci bytestream_put_le16(&extradata, 7); /* wNumCoef */ 155cabdff1aSopenharmony_ci for (int i = 0; i < 7; i++) { 156cabdff1aSopenharmony_ci bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4); 157cabdff1aSopenharmony_ci bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4); 158cabdff1aSopenharmony_ci } 159cabdff1aSopenharmony_ci ) /* End of CASE */ 160cabdff1aSopenharmony_ci CASE(ADPCM_YAMAHA, 161cabdff1aSopenharmony_ci avctx->frame_size = s->block_size * 2 / channels; 162cabdff1aSopenharmony_ci avctx->block_align = s->block_size; 163cabdff1aSopenharmony_ci ) /* End of CASE */ 164cabdff1aSopenharmony_ci CASE(ADPCM_SWF, 165cabdff1aSopenharmony_ci if (avctx->sample_rate != 11025 && 166cabdff1aSopenharmony_ci avctx->sample_rate != 22050 && 167cabdff1aSopenharmony_ci avctx->sample_rate != 44100) { 168cabdff1aSopenharmony_ci av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, " 169cabdff1aSopenharmony_ci "22050 or 44100\n"); 170cabdff1aSopenharmony_ci return AVERROR(EINVAL); 171cabdff1aSopenharmony_ci } 172cabdff1aSopenharmony_ci avctx->frame_size = 4096; /* Hardcoded according to the SWF spec. */ 173cabdff1aSopenharmony_ci avctx->block_align = (2 + channels * (22 + 4 * (avctx->frame_size - 1)) + 7) / 8; 174cabdff1aSopenharmony_ci ) /* End of CASE */ 175cabdff1aSopenharmony_ci case AV_CODEC_ID_ADPCM_IMA_SSI: 176cabdff1aSopenharmony_ci case AV_CODEC_ID_ADPCM_IMA_ALP: 177cabdff1aSopenharmony_ci avctx->frame_size = s->block_size * 2 / channels; 178cabdff1aSopenharmony_ci avctx->block_align = s->block_size; 179cabdff1aSopenharmony_ci break; 180cabdff1aSopenharmony_ci CASE(ADPCM_IMA_AMV, 181cabdff1aSopenharmony_ci if (avctx->sample_rate != 22050) { 182cabdff1aSopenharmony_ci av_log(avctx, AV_LOG_ERROR, "Sample rate must be 22050\n"); 183cabdff1aSopenharmony_ci return AVERROR(EINVAL); 184cabdff1aSopenharmony_ci } 185cabdff1aSopenharmony_ci 186cabdff1aSopenharmony_ci if (channels != 1) { 187cabdff1aSopenharmony_ci av_log(avctx, AV_LOG_ERROR, "Only mono is supported\n"); 188cabdff1aSopenharmony_ci return AVERROR(EINVAL); 189cabdff1aSopenharmony_ci } 190cabdff1aSopenharmony_ci 191cabdff1aSopenharmony_ci avctx->frame_size = s->block_size; 192cabdff1aSopenharmony_ci avctx->block_align = 8 + (FFALIGN(avctx->frame_size, 2) / 2); 193cabdff1aSopenharmony_ci ) /* End of CASE */ 194cabdff1aSopenharmony_ci CASE(ADPCM_IMA_APM, 195cabdff1aSopenharmony_ci avctx->frame_size = s->block_size * 2 / channels; 196cabdff1aSopenharmony_ci avctx->block_align = s->block_size; 197cabdff1aSopenharmony_ci 198cabdff1aSopenharmony_ci if (!(avctx->extradata = av_mallocz(28 + AV_INPUT_BUFFER_PADDING_SIZE))) 199cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 200cabdff1aSopenharmony_ci avctx->extradata_size = 28; 201cabdff1aSopenharmony_ci ) /* End of CASE */ 202cabdff1aSopenharmony_ci CASE(ADPCM_ARGO, 203cabdff1aSopenharmony_ci avctx->frame_size = 32; 204cabdff1aSopenharmony_ci avctx->block_align = 17 * channels; 205cabdff1aSopenharmony_ci ) /* End of CASE */ 206cabdff1aSopenharmony_ci CASE(ADPCM_IMA_WS, 207cabdff1aSopenharmony_ci /* each 16 bits sample gives one nibble */ 208cabdff1aSopenharmony_ci avctx->frame_size = s->block_size * 2 / channels; 209cabdff1aSopenharmony_ci avctx->block_align = s->block_size; 210cabdff1aSopenharmony_ci ) /* End of CASE */ 211cabdff1aSopenharmony_ci default: 212cabdff1aSopenharmony_ci return AVERROR(EINVAL); 213cabdff1aSopenharmony_ci } 214cabdff1aSopenharmony_ci 215cabdff1aSopenharmony_ci return 0; 216cabdff1aSopenharmony_ci} 217cabdff1aSopenharmony_ci 218cabdff1aSopenharmony_cistatic av_cold int adpcm_encode_close(AVCodecContext *avctx) 219cabdff1aSopenharmony_ci{ 220cabdff1aSopenharmony_ci ADPCMEncodeContext *s = avctx->priv_data; 221cabdff1aSopenharmony_ci av_freep(&s->paths); 222cabdff1aSopenharmony_ci av_freep(&s->node_buf); 223cabdff1aSopenharmony_ci av_freep(&s->nodep_buf); 224cabdff1aSopenharmony_ci av_freep(&s->trellis_hash); 225cabdff1aSopenharmony_ci 226cabdff1aSopenharmony_ci return 0; 227cabdff1aSopenharmony_ci} 228cabdff1aSopenharmony_ci 229cabdff1aSopenharmony_ci 230cabdff1aSopenharmony_cistatic inline uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, 231cabdff1aSopenharmony_ci int16_t sample) 232cabdff1aSopenharmony_ci{ 233cabdff1aSopenharmony_ci int delta = sample - c->prev_sample; 234cabdff1aSopenharmony_ci int nibble = FFMIN(7, abs(delta) * 4 / 235cabdff1aSopenharmony_ci ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8; 236cabdff1aSopenharmony_ci c->prev_sample += ((ff_adpcm_step_table[c->step_index] * 237cabdff1aSopenharmony_ci ff_adpcm_yamaha_difflookup[nibble]) / 8); 238cabdff1aSopenharmony_ci c->prev_sample = av_clip_int16(c->prev_sample); 239cabdff1aSopenharmony_ci c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); 240cabdff1aSopenharmony_ci return nibble; 241cabdff1aSopenharmony_ci} 242cabdff1aSopenharmony_ci 243cabdff1aSopenharmony_cistatic inline uint8_t adpcm_ima_alp_compress_sample(ADPCMChannelStatus *c, int16_t sample) 244cabdff1aSopenharmony_ci{ 245cabdff1aSopenharmony_ci const int delta = sample - c->prev_sample; 246cabdff1aSopenharmony_ci const int step = ff_adpcm_step_table[c->step_index]; 247cabdff1aSopenharmony_ci const int sign = (delta < 0) * 8; 248cabdff1aSopenharmony_ci 249cabdff1aSopenharmony_ci int nibble = FFMIN(abs(delta) * 4 / step, 7); 250cabdff1aSopenharmony_ci int diff = (step * nibble) >> 2; 251cabdff1aSopenharmony_ci if (sign) 252cabdff1aSopenharmony_ci diff = -diff; 253cabdff1aSopenharmony_ci 254cabdff1aSopenharmony_ci nibble = sign | nibble; 255cabdff1aSopenharmony_ci 256cabdff1aSopenharmony_ci c->prev_sample += diff; 257cabdff1aSopenharmony_ci c->prev_sample = av_clip_int16(c->prev_sample); 258cabdff1aSopenharmony_ci c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); 259cabdff1aSopenharmony_ci return nibble; 260cabdff1aSopenharmony_ci} 261cabdff1aSopenharmony_ci 262cabdff1aSopenharmony_cistatic inline uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, 263cabdff1aSopenharmony_ci int16_t sample) 264cabdff1aSopenharmony_ci{ 265cabdff1aSopenharmony_ci int delta = sample - c->prev_sample; 266cabdff1aSopenharmony_ci int diff, step = ff_adpcm_step_table[c->step_index]; 267cabdff1aSopenharmony_ci int nibble = 8*(delta < 0); 268cabdff1aSopenharmony_ci 269cabdff1aSopenharmony_ci delta= abs(delta); 270cabdff1aSopenharmony_ci diff = delta + (step >> 3); 271cabdff1aSopenharmony_ci 272cabdff1aSopenharmony_ci if (delta >= step) { 273cabdff1aSopenharmony_ci nibble |= 4; 274cabdff1aSopenharmony_ci delta -= step; 275cabdff1aSopenharmony_ci } 276cabdff1aSopenharmony_ci step >>= 1; 277cabdff1aSopenharmony_ci if (delta >= step) { 278cabdff1aSopenharmony_ci nibble |= 2; 279cabdff1aSopenharmony_ci delta -= step; 280cabdff1aSopenharmony_ci } 281cabdff1aSopenharmony_ci step >>= 1; 282cabdff1aSopenharmony_ci if (delta >= step) { 283cabdff1aSopenharmony_ci nibble |= 1; 284cabdff1aSopenharmony_ci delta -= step; 285cabdff1aSopenharmony_ci } 286cabdff1aSopenharmony_ci diff -= delta; 287cabdff1aSopenharmony_ci 288cabdff1aSopenharmony_ci if (nibble & 8) 289cabdff1aSopenharmony_ci c->prev_sample -= diff; 290cabdff1aSopenharmony_ci else 291cabdff1aSopenharmony_ci c->prev_sample += diff; 292cabdff1aSopenharmony_ci 293cabdff1aSopenharmony_ci c->prev_sample = av_clip_int16(c->prev_sample); 294cabdff1aSopenharmony_ci c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); 295cabdff1aSopenharmony_ci 296cabdff1aSopenharmony_ci return nibble; 297cabdff1aSopenharmony_ci} 298cabdff1aSopenharmony_ci 299cabdff1aSopenharmony_cistatic inline uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, 300cabdff1aSopenharmony_ci int16_t sample) 301cabdff1aSopenharmony_ci{ 302cabdff1aSopenharmony_ci int predictor, nibble, bias; 303cabdff1aSopenharmony_ci 304cabdff1aSopenharmony_ci predictor = (((c->sample1) * (c->coeff1)) + 305cabdff1aSopenharmony_ci (( c->sample2) * (c->coeff2))) / 64; 306cabdff1aSopenharmony_ci 307cabdff1aSopenharmony_ci nibble = sample - predictor; 308cabdff1aSopenharmony_ci if (nibble >= 0) 309cabdff1aSopenharmony_ci bias = c->idelta / 2; 310cabdff1aSopenharmony_ci else 311cabdff1aSopenharmony_ci bias = -c->idelta / 2; 312cabdff1aSopenharmony_ci 313cabdff1aSopenharmony_ci nibble = (nibble + bias) / c->idelta; 314cabdff1aSopenharmony_ci nibble = av_clip_intp2(nibble, 3) & 0x0F; 315cabdff1aSopenharmony_ci 316cabdff1aSopenharmony_ci predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta; 317cabdff1aSopenharmony_ci 318cabdff1aSopenharmony_ci c->sample2 = c->sample1; 319cabdff1aSopenharmony_ci c->sample1 = av_clip_int16(predictor); 320cabdff1aSopenharmony_ci 321cabdff1aSopenharmony_ci c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8; 322cabdff1aSopenharmony_ci if (c->idelta < 16) 323cabdff1aSopenharmony_ci c->idelta = 16; 324cabdff1aSopenharmony_ci 325cabdff1aSopenharmony_ci return nibble; 326cabdff1aSopenharmony_ci} 327cabdff1aSopenharmony_ci 328cabdff1aSopenharmony_cistatic inline uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, 329cabdff1aSopenharmony_ci int16_t sample) 330cabdff1aSopenharmony_ci{ 331cabdff1aSopenharmony_ci int nibble, delta; 332cabdff1aSopenharmony_ci 333cabdff1aSopenharmony_ci if (!c->step) { 334cabdff1aSopenharmony_ci c->predictor = 0; 335cabdff1aSopenharmony_ci c->step = 127; 336cabdff1aSopenharmony_ci } 337cabdff1aSopenharmony_ci 338cabdff1aSopenharmony_ci delta = sample - c->predictor; 339cabdff1aSopenharmony_ci 340cabdff1aSopenharmony_ci nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8; 341cabdff1aSopenharmony_ci 342cabdff1aSopenharmony_ci c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8); 343cabdff1aSopenharmony_ci c->predictor = av_clip_int16(c->predictor); 344cabdff1aSopenharmony_ci c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8; 345cabdff1aSopenharmony_ci c->step = av_clip(c->step, 127, 24576); 346cabdff1aSopenharmony_ci 347cabdff1aSopenharmony_ci return nibble; 348cabdff1aSopenharmony_ci} 349cabdff1aSopenharmony_ci 350cabdff1aSopenharmony_cistatic void adpcm_compress_trellis(AVCodecContext *avctx, 351cabdff1aSopenharmony_ci const int16_t *samples, uint8_t *dst, 352cabdff1aSopenharmony_ci ADPCMChannelStatus *c, int n, int stride) 353cabdff1aSopenharmony_ci{ 354cabdff1aSopenharmony_ci //FIXME 6% faster if frontier is a compile-time constant 355cabdff1aSopenharmony_ci ADPCMEncodeContext *s = avctx->priv_data; 356cabdff1aSopenharmony_ci const int frontier = 1 << avctx->trellis; 357cabdff1aSopenharmony_ci const int version = avctx->codec->id; 358cabdff1aSopenharmony_ci TrellisPath *paths = s->paths, *p; 359cabdff1aSopenharmony_ci TrellisNode *node_buf = s->node_buf; 360cabdff1aSopenharmony_ci TrellisNode **nodep_buf = s->nodep_buf; 361cabdff1aSopenharmony_ci TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd 362cabdff1aSopenharmony_ci TrellisNode **nodes_next = nodep_buf + frontier; 363cabdff1aSopenharmony_ci int pathn = 0, froze = -1, i, j, k, generation = 0; 364cabdff1aSopenharmony_ci uint8_t *hash = s->trellis_hash; 365cabdff1aSopenharmony_ci memset(hash, 0xff, 65536 * sizeof(*hash)); 366cabdff1aSopenharmony_ci 367cabdff1aSopenharmony_ci memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); 368cabdff1aSopenharmony_ci nodes[0] = node_buf + frontier; 369cabdff1aSopenharmony_ci nodes[0]->ssd = 0; 370cabdff1aSopenharmony_ci nodes[0]->path = 0; 371cabdff1aSopenharmony_ci nodes[0]->step = c->step_index; 372cabdff1aSopenharmony_ci nodes[0]->sample1 = c->sample1; 373cabdff1aSopenharmony_ci nodes[0]->sample2 = c->sample2; 374cabdff1aSopenharmony_ci if (version == AV_CODEC_ID_ADPCM_IMA_WAV || 375cabdff1aSopenharmony_ci version == AV_CODEC_ID_ADPCM_IMA_QT || 376cabdff1aSopenharmony_ci version == AV_CODEC_ID_ADPCM_IMA_AMV || 377cabdff1aSopenharmony_ci version == AV_CODEC_ID_ADPCM_SWF) 378cabdff1aSopenharmony_ci nodes[0]->sample1 = c->prev_sample; 379cabdff1aSopenharmony_ci if (version == AV_CODEC_ID_ADPCM_MS) 380cabdff1aSopenharmony_ci nodes[0]->step = c->idelta; 381cabdff1aSopenharmony_ci if (version == AV_CODEC_ID_ADPCM_YAMAHA) { 382cabdff1aSopenharmony_ci if (c->step == 0) { 383cabdff1aSopenharmony_ci nodes[0]->step = 127; 384cabdff1aSopenharmony_ci nodes[0]->sample1 = 0; 385cabdff1aSopenharmony_ci } else { 386cabdff1aSopenharmony_ci nodes[0]->step = c->step; 387cabdff1aSopenharmony_ci nodes[0]->sample1 = c->predictor; 388cabdff1aSopenharmony_ci } 389cabdff1aSopenharmony_ci } 390cabdff1aSopenharmony_ci 391cabdff1aSopenharmony_ci for (i = 0; i < n; i++) { 392cabdff1aSopenharmony_ci TrellisNode *t = node_buf + frontier*(i&1); 393cabdff1aSopenharmony_ci TrellisNode **u; 394cabdff1aSopenharmony_ci int sample = samples[i * stride]; 395cabdff1aSopenharmony_ci int heap_pos = 0; 396cabdff1aSopenharmony_ci memset(nodes_next, 0, frontier * sizeof(TrellisNode*)); 397cabdff1aSopenharmony_ci for (j = 0; j < frontier && nodes[j]; j++) { 398cabdff1aSopenharmony_ci // higher j have higher ssd already, so they're likely 399cabdff1aSopenharmony_ci // to yield a suboptimal next sample too 400cabdff1aSopenharmony_ci const int range = (j < frontier / 2) ? 1 : 0; 401cabdff1aSopenharmony_ci const int step = nodes[j]->step; 402cabdff1aSopenharmony_ci int nidx; 403cabdff1aSopenharmony_ci if (version == AV_CODEC_ID_ADPCM_MS) { 404cabdff1aSopenharmony_ci const int predictor = ((nodes[j]->sample1 * c->coeff1) + 405cabdff1aSopenharmony_ci (nodes[j]->sample2 * c->coeff2)) / 64; 406cabdff1aSopenharmony_ci const int div = (sample - predictor) / step; 407cabdff1aSopenharmony_ci const int nmin = av_clip(div-range, -8, 6); 408cabdff1aSopenharmony_ci const int nmax = av_clip(div+range, -7, 7); 409cabdff1aSopenharmony_ci for (nidx = nmin; nidx <= nmax; nidx++) { 410cabdff1aSopenharmony_ci const int nibble = nidx & 0xf; 411cabdff1aSopenharmony_ci int dec_sample = predictor + nidx * step; 412cabdff1aSopenharmony_ci#define STORE_NODE(NAME, STEP_INDEX)\ 413cabdff1aSopenharmony_ci int d;\ 414cabdff1aSopenharmony_ci uint32_t ssd;\ 415cabdff1aSopenharmony_ci int pos;\ 416cabdff1aSopenharmony_ci TrellisNode *u;\ 417cabdff1aSopenharmony_ci uint8_t *h;\ 418cabdff1aSopenharmony_ci dec_sample = av_clip_int16(dec_sample);\ 419cabdff1aSopenharmony_ci d = sample - dec_sample;\ 420cabdff1aSopenharmony_ci ssd = nodes[j]->ssd + d*(unsigned)d;\ 421cabdff1aSopenharmony_ci /* Check for wraparound, skip such samples completely. \ 422cabdff1aSopenharmony_ci * Note, changing ssd to a 64 bit variable would be \ 423cabdff1aSopenharmony_ci * simpler, avoiding this check, but it's slower on \ 424cabdff1aSopenharmony_ci * x86 32 bit at the moment. */\ 425cabdff1aSopenharmony_ci if (ssd < nodes[j]->ssd)\ 426cabdff1aSopenharmony_ci goto next_##NAME;\ 427cabdff1aSopenharmony_ci /* Collapse any two states with the same previous sample value. \ 428cabdff1aSopenharmony_ci * One could also distinguish states by step and by 2nd to last 429cabdff1aSopenharmony_ci * sample, but the effects of that are negligible. 430cabdff1aSopenharmony_ci * Since nodes in the previous generation are iterated 431cabdff1aSopenharmony_ci * through a heap, they're roughly ordered from better to 432cabdff1aSopenharmony_ci * worse, but not strictly ordered. Therefore, an earlier 433cabdff1aSopenharmony_ci * node with the same sample value is better in most cases 434cabdff1aSopenharmony_ci * (and thus the current is skipped), but not strictly 435cabdff1aSopenharmony_ci * in all cases. Only skipping samples where ssd >= 436cabdff1aSopenharmony_ci * ssd of the earlier node with the same sample gives 437cabdff1aSopenharmony_ci * slightly worse quality, though, for some reason. */ \ 438cabdff1aSopenharmony_ci h = &hash[(uint16_t) dec_sample];\ 439cabdff1aSopenharmony_ci if (*h == generation)\ 440cabdff1aSopenharmony_ci goto next_##NAME;\ 441cabdff1aSopenharmony_ci if (heap_pos < frontier) {\ 442cabdff1aSopenharmony_ci pos = heap_pos++;\ 443cabdff1aSopenharmony_ci } else {\ 444cabdff1aSopenharmony_ci /* Try to replace one of the leaf nodes with the new \ 445cabdff1aSopenharmony_ci * one, but try a different slot each time. */\ 446cabdff1aSopenharmony_ci pos = (frontier >> 1) +\ 447cabdff1aSopenharmony_ci (heap_pos & ((frontier >> 1) - 1));\ 448cabdff1aSopenharmony_ci if (ssd > nodes_next[pos]->ssd)\ 449cabdff1aSopenharmony_ci goto next_##NAME;\ 450cabdff1aSopenharmony_ci heap_pos++;\ 451cabdff1aSopenharmony_ci }\ 452cabdff1aSopenharmony_ci *h = generation;\ 453cabdff1aSopenharmony_ci u = nodes_next[pos];\ 454cabdff1aSopenharmony_ci if (!u) {\ 455cabdff1aSopenharmony_ci av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\ 456cabdff1aSopenharmony_ci u = t++;\ 457cabdff1aSopenharmony_ci nodes_next[pos] = u;\ 458cabdff1aSopenharmony_ci u->path = pathn++;\ 459cabdff1aSopenharmony_ci }\ 460cabdff1aSopenharmony_ci u->ssd = ssd;\ 461cabdff1aSopenharmony_ci u->step = STEP_INDEX;\ 462cabdff1aSopenharmony_ci u->sample2 = nodes[j]->sample1;\ 463cabdff1aSopenharmony_ci u->sample1 = dec_sample;\ 464cabdff1aSopenharmony_ci paths[u->path].nibble = nibble;\ 465cabdff1aSopenharmony_ci paths[u->path].prev = nodes[j]->path;\ 466cabdff1aSopenharmony_ci /* Sift the newly inserted node up in the heap to \ 467cabdff1aSopenharmony_ci * restore the heap property. */\ 468cabdff1aSopenharmony_ci while (pos > 0) {\ 469cabdff1aSopenharmony_ci int parent = (pos - 1) >> 1;\ 470cabdff1aSopenharmony_ci if (nodes_next[parent]->ssd <= ssd)\ 471cabdff1aSopenharmony_ci break;\ 472cabdff1aSopenharmony_ci FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ 473cabdff1aSopenharmony_ci pos = parent;\ 474cabdff1aSopenharmony_ci }\ 475cabdff1aSopenharmony_ci next_##NAME:; 476cabdff1aSopenharmony_ci STORE_NODE(ms, FFMAX(16, 477cabdff1aSopenharmony_ci (ff_adpcm_AdaptationTable[nibble] * step) >> 8)); 478cabdff1aSopenharmony_ci } 479cabdff1aSopenharmony_ci } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV || 480cabdff1aSopenharmony_ci version == AV_CODEC_ID_ADPCM_IMA_QT || 481cabdff1aSopenharmony_ci version == AV_CODEC_ID_ADPCM_IMA_AMV || 482cabdff1aSopenharmony_ci version == AV_CODEC_ID_ADPCM_SWF) { 483cabdff1aSopenharmony_ci#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 484cabdff1aSopenharmony_ci const int predictor = nodes[j]->sample1;\ 485cabdff1aSopenharmony_ci const int div = (sample - predictor) * 4 / STEP_TABLE;\ 486cabdff1aSopenharmony_ci int nmin = av_clip(div - range, -7, 6);\ 487cabdff1aSopenharmony_ci int nmax = av_clip(div + range, -6, 7);\ 488cabdff1aSopenharmony_ci if (nmin <= 0)\ 489cabdff1aSopenharmony_ci nmin--; /* distinguish -0 from +0 */\ 490cabdff1aSopenharmony_ci if (nmax < 0)\ 491cabdff1aSopenharmony_ci nmax--;\ 492cabdff1aSopenharmony_ci for (nidx = nmin; nidx <= nmax; nidx++) {\ 493cabdff1aSopenharmony_ci const int nibble = nidx < 0 ? 7 - nidx : nidx;\ 494cabdff1aSopenharmony_ci int dec_sample = predictor +\ 495cabdff1aSopenharmony_ci (STEP_TABLE *\ 496cabdff1aSopenharmony_ci ff_adpcm_yamaha_difflookup[nibble]) / 8;\ 497cabdff1aSopenharmony_ci STORE_NODE(NAME, STEP_INDEX);\ 498cabdff1aSopenharmony_ci } 499cabdff1aSopenharmony_ci LOOP_NODES(ima, ff_adpcm_step_table[step], 500cabdff1aSopenharmony_ci av_clip(step + ff_adpcm_index_table[nibble], 0, 88)); 501cabdff1aSopenharmony_ci } else { //AV_CODEC_ID_ADPCM_YAMAHA 502cabdff1aSopenharmony_ci LOOP_NODES(yamaha, step, 503cabdff1aSopenharmony_ci av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, 504cabdff1aSopenharmony_ci 127, 24576)); 505cabdff1aSopenharmony_ci#undef LOOP_NODES 506cabdff1aSopenharmony_ci#undef STORE_NODE 507cabdff1aSopenharmony_ci } 508cabdff1aSopenharmony_ci } 509cabdff1aSopenharmony_ci 510cabdff1aSopenharmony_ci u = nodes; 511cabdff1aSopenharmony_ci nodes = nodes_next; 512cabdff1aSopenharmony_ci nodes_next = u; 513cabdff1aSopenharmony_ci 514cabdff1aSopenharmony_ci generation++; 515cabdff1aSopenharmony_ci if (generation == 255) { 516cabdff1aSopenharmony_ci memset(hash, 0xff, 65536 * sizeof(*hash)); 517cabdff1aSopenharmony_ci generation = 0; 518cabdff1aSopenharmony_ci } 519cabdff1aSopenharmony_ci 520cabdff1aSopenharmony_ci // prevent overflow 521cabdff1aSopenharmony_ci if (nodes[0]->ssd > (1 << 28)) { 522cabdff1aSopenharmony_ci for (j = 1; j < frontier && nodes[j]; j++) 523cabdff1aSopenharmony_ci nodes[j]->ssd -= nodes[0]->ssd; 524cabdff1aSopenharmony_ci nodes[0]->ssd = 0; 525cabdff1aSopenharmony_ci } 526cabdff1aSopenharmony_ci 527cabdff1aSopenharmony_ci // merge old paths to save memory 528cabdff1aSopenharmony_ci if (i == froze + FREEZE_INTERVAL) { 529cabdff1aSopenharmony_ci p = &paths[nodes[0]->path]; 530cabdff1aSopenharmony_ci for (k = i; k > froze; k--) { 531cabdff1aSopenharmony_ci dst[k] = p->nibble; 532cabdff1aSopenharmony_ci p = &paths[p->prev]; 533cabdff1aSopenharmony_ci } 534cabdff1aSopenharmony_ci froze = i; 535cabdff1aSopenharmony_ci pathn = 0; 536cabdff1aSopenharmony_ci // other nodes might use paths that don't coincide with the frozen one. 537cabdff1aSopenharmony_ci // checking which nodes do so is too slow, so just kill them all. 538cabdff1aSopenharmony_ci // this also slightly improves quality, but I don't know why. 539cabdff1aSopenharmony_ci memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*)); 540cabdff1aSopenharmony_ci } 541cabdff1aSopenharmony_ci } 542cabdff1aSopenharmony_ci 543cabdff1aSopenharmony_ci p = &paths[nodes[0]->path]; 544cabdff1aSopenharmony_ci for (i = n - 1; i > froze; i--) { 545cabdff1aSopenharmony_ci dst[i] = p->nibble; 546cabdff1aSopenharmony_ci p = &paths[p->prev]; 547cabdff1aSopenharmony_ci } 548cabdff1aSopenharmony_ci 549cabdff1aSopenharmony_ci c->predictor = nodes[0]->sample1; 550cabdff1aSopenharmony_ci c->sample1 = nodes[0]->sample1; 551cabdff1aSopenharmony_ci c->sample2 = nodes[0]->sample2; 552cabdff1aSopenharmony_ci c->step_index = nodes[0]->step; 553cabdff1aSopenharmony_ci c->step = nodes[0]->step; 554cabdff1aSopenharmony_ci c->idelta = nodes[0]->step; 555cabdff1aSopenharmony_ci} 556cabdff1aSopenharmony_ci 557cabdff1aSopenharmony_ci#if CONFIG_ADPCM_ARGO_ENCODER 558cabdff1aSopenharmony_cistatic inline int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s, 559cabdff1aSopenharmony_ci int shift, int flag) 560cabdff1aSopenharmony_ci{ 561cabdff1aSopenharmony_ci int nibble; 562cabdff1aSopenharmony_ci 563cabdff1aSopenharmony_ci if (flag) 564cabdff1aSopenharmony_ci nibble = 4 * s - 8 * cs->sample1 + 4 * cs->sample2; 565cabdff1aSopenharmony_ci else 566cabdff1aSopenharmony_ci nibble = 4 * s - 4 * cs->sample1; 567cabdff1aSopenharmony_ci 568cabdff1aSopenharmony_ci return (nibble >> shift) & 0x0F; 569cabdff1aSopenharmony_ci} 570cabdff1aSopenharmony_ci 571cabdff1aSopenharmony_cistatic int64_t adpcm_argo_compress_block(ADPCMChannelStatus *cs, PutBitContext *pb, 572cabdff1aSopenharmony_ci const int16_t *samples, int nsamples, 573cabdff1aSopenharmony_ci int shift, int flag) 574cabdff1aSopenharmony_ci{ 575cabdff1aSopenharmony_ci int64_t error = 0; 576cabdff1aSopenharmony_ci 577cabdff1aSopenharmony_ci if (pb) { 578cabdff1aSopenharmony_ci put_bits(pb, 4, shift - 2); 579cabdff1aSopenharmony_ci put_bits(pb, 1, 0); 580cabdff1aSopenharmony_ci put_bits(pb, 1, !!flag); 581cabdff1aSopenharmony_ci put_bits(pb, 2, 0); 582cabdff1aSopenharmony_ci } 583cabdff1aSopenharmony_ci 584cabdff1aSopenharmony_ci for (int n = 0; n < nsamples; n++) { 585cabdff1aSopenharmony_ci /* Compress the nibble, then expand it to see how much precision we've lost. */ 586cabdff1aSopenharmony_ci int nibble = adpcm_argo_compress_nibble(cs, samples[n], shift, flag); 587cabdff1aSopenharmony_ci int16_t sample = ff_adpcm_argo_expand_nibble(cs, nibble, shift, flag); 588cabdff1aSopenharmony_ci 589cabdff1aSopenharmony_ci error += abs(samples[n] - sample); 590cabdff1aSopenharmony_ci 591cabdff1aSopenharmony_ci if (pb) 592cabdff1aSopenharmony_ci put_bits(pb, 4, nibble); 593cabdff1aSopenharmony_ci } 594cabdff1aSopenharmony_ci 595cabdff1aSopenharmony_ci return error; 596cabdff1aSopenharmony_ci} 597cabdff1aSopenharmony_ci#endif 598cabdff1aSopenharmony_ci 599cabdff1aSopenharmony_cistatic int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, 600cabdff1aSopenharmony_ci const AVFrame *frame, int *got_packet_ptr) 601cabdff1aSopenharmony_ci{ 602cabdff1aSopenharmony_ci int st, pkt_size, ret; 603cabdff1aSopenharmony_ci const int16_t *samples; 604cabdff1aSopenharmony_ci int16_t **samples_p; 605cabdff1aSopenharmony_ci uint8_t *dst; 606cabdff1aSopenharmony_ci ADPCMEncodeContext *c = avctx->priv_data; 607cabdff1aSopenharmony_ci int channels = avctx->ch_layout.nb_channels; 608cabdff1aSopenharmony_ci 609cabdff1aSopenharmony_ci samples = (const int16_t *)frame->data[0]; 610cabdff1aSopenharmony_ci samples_p = (int16_t **)frame->extended_data; 611cabdff1aSopenharmony_ci st = channels == 2; 612cabdff1aSopenharmony_ci 613cabdff1aSopenharmony_ci if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_SSI || 614cabdff1aSopenharmony_ci avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_ALP || 615cabdff1aSopenharmony_ci avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_APM || 616cabdff1aSopenharmony_ci avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_WS) 617cabdff1aSopenharmony_ci pkt_size = (frame->nb_samples * channels) / 2; 618cabdff1aSopenharmony_ci else 619cabdff1aSopenharmony_ci pkt_size = avctx->block_align; 620cabdff1aSopenharmony_ci if ((ret = ff_get_encode_buffer(avctx, avpkt, pkt_size, 0)) < 0) 621cabdff1aSopenharmony_ci return ret; 622cabdff1aSopenharmony_ci dst = avpkt->data; 623cabdff1aSopenharmony_ci 624cabdff1aSopenharmony_ci switch(avctx->codec->id) { 625cabdff1aSopenharmony_ci CASE(ADPCM_IMA_WAV, 626cabdff1aSopenharmony_ci int blocks = (frame->nb_samples - 1) / 8; 627cabdff1aSopenharmony_ci 628cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 629cabdff1aSopenharmony_ci ADPCMChannelStatus *status = &c->status[ch]; 630cabdff1aSopenharmony_ci status->prev_sample = samples_p[ch][0]; 631cabdff1aSopenharmony_ci /* status->step_index = 0; 632cabdff1aSopenharmony_ci XXX: not sure how to init the state machine */ 633cabdff1aSopenharmony_ci bytestream_put_le16(&dst, status->prev_sample); 634cabdff1aSopenharmony_ci *dst++ = status->step_index; 635cabdff1aSopenharmony_ci *dst++ = 0; /* unknown */ 636cabdff1aSopenharmony_ci } 637cabdff1aSopenharmony_ci 638cabdff1aSopenharmony_ci /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */ 639cabdff1aSopenharmony_ci if (avctx->trellis > 0) { 640cabdff1aSopenharmony_ci uint8_t *buf; 641cabdff1aSopenharmony_ci if (!FF_ALLOC_TYPED_ARRAY(buf, channels * blocks * 8)) 642cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 643cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 644cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, &samples_p[ch][1], 645cabdff1aSopenharmony_ci buf + ch * blocks * 8, &c->status[ch], 646cabdff1aSopenharmony_ci blocks * 8, 1); 647cabdff1aSopenharmony_ci } 648cabdff1aSopenharmony_ci for (int i = 0; i < blocks; i++) { 649cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 650cabdff1aSopenharmony_ci uint8_t *buf1 = buf + ch * blocks * 8 + i * 8; 651cabdff1aSopenharmony_ci for (int j = 0; j < 8; j += 2) 652cabdff1aSopenharmony_ci *dst++ = buf1[j] | (buf1[j + 1] << 4); 653cabdff1aSopenharmony_ci } 654cabdff1aSopenharmony_ci } 655cabdff1aSopenharmony_ci av_free(buf); 656cabdff1aSopenharmony_ci } else { 657cabdff1aSopenharmony_ci for (int i = 0; i < blocks; i++) { 658cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 659cabdff1aSopenharmony_ci ADPCMChannelStatus *status = &c->status[ch]; 660cabdff1aSopenharmony_ci const int16_t *smp = &samples_p[ch][1 + i * 8]; 661cabdff1aSopenharmony_ci for (int j = 0; j < 8; j += 2) { 662cabdff1aSopenharmony_ci uint8_t v = adpcm_ima_compress_sample(status, smp[j ]); 663cabdff1aSopenharmony_ci v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4; 664cabdff1aSopenharmony_ci *dst++ = v; 665cabdff1aSopenharmony_ci } 666cabdff1aSopenharmony_ci } 667cabdff1aSopenharmony_ci } 668cabdff1aSopenharmony_ci } 669cabdff1aSopenharmony_ci ) /* End of CASE */ 670cabdff1aSopenharmony_ci CASE(ADPCM_IMA_QT, 671cabdff1aSopenharmony_ci PutBitContext pb; 672cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 673cabdff1aSopenharmony_ci 674cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 675cabdff1aSopenharmony_ci ADPCMChannelStatus *status = &c->status[ch]; 676cabdff1aSopenharmony_ci put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7); 677cabdff1aSopenharmony_ci put_bits(&pb, 7, status->step_index); 678cabdff1aSopenharmony_ci if (avctx->trellis > 0) { 679cabdff1aSopenharmony_ci uint8_t buf[64]; 680cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status, 681cabdff1aSopenharmony_ci 64, 1); 682cabdff1aSopenharmony_ci for (int i = 0; i < 64; i++) 683cabdff1aSopenharmony_ci put_bits(&pb, 4, buf[i ^ 1]); 684cabdff1aSopenharmony_ci status->prev_sample = status->predictor; 685cabdff1aSopenharmony_ci } else { 686cabdff1aSopenharmony_ci for (int i = 0; i < 64; i += 2) { 687cabdff1aSopenharmony_ci int t1, t2; 688cabdff1aSopenharmony_ci t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]); 689cabdff1aSopenharmony_ci t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]); 690cabdff1aSopenharmony_ci put_bits(&pb, 4, t2); 691cabdff1aSopenharmony_ci put_bits(&pb, 4, t1); 692cabdff1aSopenharmony_ci } 693cabdff1aSopenharmony_ci } 694cabdff1aSopenharmony_ci } 695cabdff1aSopenharmony_ci 696cabdff1aSopenharmony_ci flush_put_bits(&pb); 697cabdff1aSopenharmony_ci ) /* End of CASE */ 698cabdff1aSopenharmony_ci CASE(ADPCM_IMA_SSI, 699cabdff1aSopenharmony_ci PutBitContext pb; 700cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 701cabdff1aSopenharmony_ci 702cabdff1aSopenharmony_ci av_assert0(avctx->trellis == 0); 703cabdff1aSopenharmony_ci 704cabdff1aSopenharmony_ci for (int i = 0; i < frame->nb_samples; i++) { 705cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 706cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++)); 707cabdff1aSopenharmony_ci } 708cabdff1aSopenharmony_ci } 709cabdff1aSopenharmony_ci 710cabdff1aSopenharmony_ci flush_put_bits(&pb); 711cabdff1aSopenharmony_ci ) /* End of CASE */ 712cabdff1aSopenharmony_ci CASE(ADPCM_IMA_ALP, 713cabdff1aSopenharmony_ci PutBitContext pb; 714cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 715cabdff1aSopenharmony_ci 716cabdff1aSopenharmony_ci av_assert0(avctx->trellis == 0); 717cabdff1aSopenharmony_ci 718cabdff1aSopenharmony_ci for (int n = frame->nb_samples / 2; n > 0; n--) { 719cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 720cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, *samples++)); 721cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, samples[st])); 722cabdff1aSopenharmony_ci } 723cabdff1aSopenharmony_ci samples += channels; 724cabdff1aSopenharmony_ci } 725cabdff1aSopenharmony_ci 726cabdff1aSopenharmony_ci flush_put_bits(&pb); 727cabdff1aSopenharmony_ci ) /* End of CASE */ 728cabdff1aSopenharmony_ci CASE(ADPCM_SWF, 729cabdff1aSopenharmony_ci const int n = frame->nb_samples - 1; 730cabdff1aSopenharmony_ci PutBitContext pb; 731cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 732cabdff1aSopenharmony_ci 733cabdff1aSopenharmony_ci /* NB: This is safe as we don't have AV_CODEC_CAP_SMALL_LAST_FRAME. */ 734cabdff1aSopenharmony_ci av_assert0(n == 4095); 735cabdff1aSopenharmony_ci 736cabdff1aSopenharmony_ci // store AdpcmCodeSize 737cabdff1aSopenharmony_ci put_bits(&pb, 2, 2); // set 4-bit flash adpcm format 738cabdff1aSopenharmony_ci 739cabdff1aSopenharmony_ci // init the encoder state 740cabdff1aSopenharmony_ci for (int i = 0; i < channels; i++) { 741cabdff1aSopenharmony_ci // clip step so it fits 6 bits 742cabdff1aSopenharmony_ci c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6); 743cabdff1aSopenharmony_ci put_sbits(&pb, 16, samples[i]); 744cabdff1aSopenharmony_ci put_bits(&pb, 6, c->status[i].step_index); 745cabdff1aSopenharmony_ci c->status[i].prev_sample = samples[i]; 746cabdff1aSopenharmony_ci } 747cabdff1aSopenharmony_ci 748cabdff1aSopenharmony_ci if (avctx->trellis > 0) { 749cabdff1aSopenharmony_ci uint8_t buf[8190 /* = 2 * n */]; 750cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples + channels, buf, 751cabdff1aSopenharmony_ci &c->status[0], n, channels); 752cabdff1aSopenharmony_ci if (channels == 2) 753cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples + channels + 1, 754cabdff1aSopenharmony_ci buf + n, &c->status[1], n, 755cabdff1aSopenharmony_ci channels); 756cabdff1aSopenharmony_ci for (int i = 0; i < n; i++) { 757cabdff1aSopenharmony_ci put_bits(&pb, 4, buf[i]); 758cabdff1aSopenharmony_ci if (channels == 2) 759cabdff1aSopenharmony_ci put_bits(&pb, 4, buf[n + i]); 760cabdff1aSopenharmony_ci } 761cabdff1aSopenharmony_ci } else { 762cabdff1aSopenharmony_ci for (int i = 1; i < frame->nb_samples; i++) { 763cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], 764cabdff1aSopenharmony_ci samples[channels * i])); 765cabdff1aSopenharmony_ci if (channels == 2) 766cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], 767cabdff1aSopenharmony_ci samples[2 * i + 1])); 768cabdff1aSopenharmony_ci } 769cabdff1aSopenharmony_ci } 770cabdff1aSopenharmony_ci flush_put_bits(&pb); 771cabdff1aSopenharmony_ci ) /* End of CASE */ 772cabdff1aSopenharmony_ci CASE(ADPCM_MS, 773cabdff1aSopenharmony_ci for (int i = 0; i < channels; i++) { 774cabdff1aSopenharmony_ci int predictor = 0; 775cabdff1aSopenharmony_ci *dst++ = predictor; 776cabdff1aSopenharmony_ci c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor]; 777cabdff1aSopenharmony_ci c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor]; 778cabdff1aSopenharmony_ci } 779cabdff1aSopenharmony_ci for (int i = 0; i < channels; i++) { 780cabdff1aSopenharmony_ci if (c->status[i].idelta < 16) 781cabdff1aSopenharmony_ci c->status[i].idelta = 16; 782cabdff1aSopenharmony_ci bytestream_put_le16(&dst, c->status[i].idelta); 783cabdff1aSopenharmony_ci } 784cabdff1aSopenharmony_ci for (int i = 0; i < channels; i++) 785cabdff1aSopenharmony_ci c->status[i].sample2= *samples++; 786cabdff1aSopenharmony_ci for (int i = 0; i < channels; i++) { 787cabdff1aSopenharmony_ci c->status[i].sample1 = *samples++; 788cabdff1aSopenharmony_ci bytestream_put_le16(&dst, c->status[i].sample1); 789cabdff1aSopenharmony_ci } 790cabdff1aSopenharmony_ci for (int i = 0; i < channels; i++) 791cabdff1aSopenharmony_ci bytestream_put_le16(&dst, c->status[i].sample2); 792cabdff1aSopenharmony_ci 793cabdff1aSopenharmony_ci if (avctx->trellis > 0) { 794cabdff1aSopenharmony_ci const int n = avctx->block_align - 7 * channels; 795cabdff1aSopenharmony_ci uint8_t *buf = av_malloc(2 * n); 796cabdff1aSopenharmony_ci if (!buf) 797cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 798cabdff1aSopenharmony_ci if (channels == 1) { 799cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, 800cabdff1aSopenharmony_ci channels); 801cabdff1aSopenharmony_ci for (int i = 0; i < n; i += 2) 802cabdff1aSopenharmony_ci *dst++ = (buf[i] << 4) | buf[i + 1]; 803cabdff1aSopenharmony_ci } else { 804cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples, buf, 805cabdff1aSopenharmony_ci &c->status[0], n, channels); 806cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples + 1, buf + n, 807cabdff1aSopenharmony_ci &c->status[1], n, channels); 808cabdff1aSopenharmony_ci for (int i = 0; i < n; i++) 809cabdff1aSopenharmony_ci *dst++ = (buf[i] << 4) | buf[n + i]; 810cabdff1aSopenharmony_ci } 811cabdff1aSopenharmony_ci av_free(buf); 812cabdff1aSopenharmony_ci } else { 813cabdff1aSopenharmony_ci for (int i = 7 * channels; i < avctx->block_align; i++) { 814cabdff1aSopenharmony_ci int nibble; 815cabdff1aSopenharmony_ci nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4; 816cabdff1aSopenharmony_ci nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++); 817cabdff1aSopenharmony_ci *dst++ = nibble; 818cabdff1aSopenharmony_ci } 819cabdff1aSopenharmony_ci } 820cabdff1aSopenharmony_ci ) /* End of CASE */ 821cabdff1aSopenharmony_ci CASE(ADPCM_YAMAHA, 822cabdff1aSopenharmony_ci int n = frame->nb_samples / 2; 823cabdff1aSopenharmony_ci if (avctx->trellis > 0) { 824cabdff1aSopenharmony_ci uint8_t *buf = av_malloc(2 * n * 2); 825cabdff1aSopenharmony_ci if (!buf) 826cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 827cabdff1aSopenharmony_ci n *= 2; 828cabdff1aSopenharmony_ci if (channels == 1) { 829cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, 830cabdff1aSopenharmony_ci channels); 831cabdff1aSopenharmony_ci for (int i = 0; i < n; i += 2) 832cabdff1aSopenharmony_ci *dst++ = buf[i] | (buf[i + 1] << 4); 833cabdff1aSopenharmony_ci } else { 834cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples, buf, 835cabdff1aSopenharmony_ci &c->status[0], n, channels); 836cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples + 1, buf + n, 837cabdff1aSopenharmony_ci &c->status[1], n, channels); 838cabdff1aSopenharmony_ci for (int i = 0; i < n; i++) 839cabdff1aSopenharmony_ci *dst++ = buf[i] | (buf[n + i] << 4); 840cabdff1aSopenharmony_ci } 841cabdff1aSopenharmony_ci av_free(buf); 842cabdff1aSopenharmony_ci } else 843cabdff1aSopenharmony_ci for (n *= channels; n > 0; n--) { 844cabdff1aSopenharmony_ci int nibble; 845cabdff1aSopenharmony_ci nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); 846cabdff1aSopenharmony_ci nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; 847cabdff1aSopenharmony_ci *dst++ = nibble; 848cabdff1aSopenharmony_ci } 849cabdff1aSopenharmony_ci ) /* End of CASE */ 850cabdff1aSopenharmony_ci CASE(ADPCM_IMA_APM, 851cabdff1aSopenharmony_ci PutBitContext pb; 852cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 853cabdff1aSopenharmony_ci 854cabdff1aSopenharmony_ci av_assert0(avctx->trellis == 0); 855cabdff1aSopenharmony_ci 856cabdff1aSopenharmony_ci for (int n = frame->nb_samples / 2; n > 0; n--) { 857cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 858cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++)); 859cabdff1aSopenharmony_ci put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, samples[st])); 860cabdff1aSopenharmony_ci } 861cabdff1aSopenharmony_ci samples += channels; 862cabdff1aSopenharmony_ci } 863cabdff1aSopenharmony_ci 864cabdff1aSopenharmony_ci flush_put_bits(&pb); 865cabdff1aSopenharmony_ci ) /* End of CASE */ 866cabdff1aSopenharmony_ci CASE(ADPCM_IMA_AMV, 867cabdff1aSopenharmony_ci av_assert0(channels == 1); 868cabdff1aSopenharmony_ci 869cabdff1aSopenharmony_ci c->status[0].prev_sample = *samples; 870cabdff1aSopenharmony_ci bytestream_put_le16(&dst, c->status[0].prev_sample); 871cabdff1aSopenharmony_ci bytestream_put_byte(&dst, c->status[0].step_index); 872cabdff1aSopenharmony_ci bytestream_put_byte(&dst, 0); 873cabdff1aSopenharmony_ci bytestream_put_le32(&dst, avctx->frame_size); 874cabdff1aSopenharmony_ci 875cabdff1aSopenharmony_ci if (avctx->trellis > 0) { 876cabdff1aSopenharmony_ci const int n = frame->nb_samples >> 1; 877cabdff1aSopenharmony_ci uint8_t *buf = av_malloc(2 * n); 878cabdff1aSopenharmony_ci 879cabdff1aSopenharmony_ci if (!buf) 880cabdff1aSopenharmony_ci return AVERROR(ENOMEM); 881cabdff1aSopenharmony_ci 882cabdff1aSopenharmony_ci adpcm_compress_trellis(avctx, samples, buf, &c->status[0], 2 * n, channels); 883cabdff1aSopenharmony_ci for (int i = 0; i < n; i++) 884cabdff1aSopenharmony_ci bytestream_put_byte(&dst, (buf[2 * i] << 4) | buf[2 * i + 1]); 885cabdff1aSopenharmony_ci 886cabdff1aSopenharmony_ci samples += 2 * n; 887cabdff1aSopenharmony_ci av_free(buf); 888cabdff1aSopenharmony_ci } else for (int n = frame->nb_samples >> 1; n > 0; n--) { 889cabdff1aSopenharmony_ci int nibble; 890cabdff1aSopenharmony_ci nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4; 891cabdff1aSopenharmony_ci nibble |= adpcm_ima_compress_sample(&c->status[0], *samples++) & 0x0F; 892cabdff1aSopenharmony_ci bytestream_put_byte(&dst, nibble); 893cabdff1aSopenharmony_ci } 894cabdff1aSopenharmony_ci 895cabdff1aSopenharmony_ci if (avctx->frame_size & 1) { 896cabdff1aSopenharmony_ci int nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4; 897cabdff1aSopenharmony_ci bytestream_put_byte(&dst, nibble); 898cabdff1aSopenharmony_ci } 899cabdff1aSopenharmony_ci ) /* End of CASE */ 900cabdff1aSopenharmony_ci CASE(ADPCM_ARGO, 901cabdff1aSopenharmony_ci PutBitContext pb; 902cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 903cabdff1aSopenharmony_ci 904cabdff1aSopenharmony_ci av_assert0(frame->nb_samples == 32); 905cabdff1aSopenharmony_ci 906cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 907cabdff1aSopenharmony_ci int64_t error = INT64_MAX, tmperr = INT64_MAX; 908cabdff1aSopenharmony_ci int shift = 2, flag = 0; 909cabdff1aSopenharmony_ci int saved1 = c->status[ch].sample1; 910cabdff1aSopenharmony_ci int saved2 = c->status[ch].sample2; 911cabdff1aSopenharmony_ci 912cabdff1aSopenharmony_ci /* Find the optimal coefficients, bail early if we find a perfect result. */ 913cabdff1aSopenharmony_ci for (int s = 2; s < 18 && tmperr != 0; s++) { 914cabdff1aSopenharmony_ci for (int f = 0; f < 2 && tmperr != 0; f++) { 915cabdff1aSopenharmony_ci c->status[ch].sample1 = saved1; 916cabdff1aSopenharmony_ci c->status[ch].sample2 = saved2; 917cabdff1aSopenharmony_ci tmperr = adpcm_argo_compress_block(c->status + ch, NULL, samples_p[ch], 918cabdff1aSopenharmony_ci frame->nb_samples, s, f); 919cabdff1aSopenharmony_ci if (tmperr < error) { 920cabdff1aSopenharmony_ci shift = s; 921cabdff1aSopenharmony_ci flag = f; 922cabdff1aSopenharmony_ci error = tmperr; 923cabdff1aSopenharmony_ci } 924cabdff1aSopenharmony_ci } 925cabdff1aSopenharmony_ci } 926cabdff1aSopenharmony_ci 927cabdff1aSopenharmony_ci /* Now actually do the encode. */ 928cabdff1aSopenharmony_ci c->status[ch].sample1 = saved1; 929cabdff1aSopenharmony_ci c->status[ch].sample2 = saved2; 930cabdff1aSopenharmony_ci adpcm_argo_compress_block(c->status + ch, &pb, samples_p[ch], 931cabdff1aSopenharmony_ci frame->nb_samples, shift, flag); 932cabdff1aSopenharmony_ci } 933cabdff1aSopenharmony_ci 934cabdff1aSopenharmony_ci flush_put_bits(&pb); 935cabdff1aSopenharmony_ci ) /* End of CASE */ 936cabdff1aSopenharmony_ci CASE(ADPCM_IMA_WS, 937cabdff1aSopenharmony_ci PutBitContext pb; 938cabdff1aSopenharmony_ci init_put_bits(&pb, dst, pkt_size); 939cabdff1aSopenharmony_ci 940cabdff1aSopenharmony_ci av_assert0(avctx->trellis == 0); 941cabdff1aSopenharmony_ci for (int n = frame->nb_samples / 2; n > 0; n--) { 942cabdff1aSopenharmony_ci /* stereo: 1 byte (2 samples) for left, 1 byte for right */ 943cabdff1aSopenharmony_ci for (int ch = 0; ch < channels; ch++) { 944cabdff1aSopenharmony_ci int t1, t2; 945cabdff1aSopenharmony_ci t1 = adpcm_ima_compress_sample(&c->status[ch], *samples++); 946cabdff1aSopenharmony_ci t2 = adpcm_ima_compress_sample(&c->status[ch], samples[st]); 947cabdff1aSopenharmony_ci put_bits(&pb, 4, t2); 948cabdff1aSopenharmony_ci put_bits(&pb, 4, t1); 949cabdff1aSopenharmony_ci } 950cabdff1aSopenharmony_ci samples += channels; 951cabdff1aSopenharmony_ci } 952cabdff1aSopenharmony_ci flush_put_bits(&pb); 953cabdff1aSopenharmony_ci ) /* End of CASE */ 954cabdff1aSopenharmony_ci default: 955cabdff1aSopenharmony_ci return AVERROR(EINVAL); 956cabdff1aSopenharmony_ci } 957cabdff1aSopenharmony_ci 958cabdff1aSopenharmony_ci *got_packet_ptr = 1; 959cabdff1aSopenharmony_ci return 0; 960cabdff1aSopenharmony_ci} 961cabdff1aSopenharmony_ci 962cabdff1aSopenharmony_cistatic const enum AVSampleFormat sample_fmts[] = { 963cabdff1aSopenharmony_ci AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE 964cabdff1aSopenharmony_ci}; 965cabdff1aSopenharmony_ci 966cabdff1aSopenharmony_cistatic const enum AVSampleFormat sample_fmts_p[] = { 967cabdff1aSopenharmony_ci AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE 968cabdff1aSopenharmony_ci}; 969cabdff1aSopenharmony_ci 970cabdff1aSopenharmony_cistatic const AVChannelLayout ch_layouts[] = { 971cabdff1aSopenharmony_ci AV_CHANNEL_LAYOUT_MONO, 972cabdff1aSopenharmony_ci AV_CHANNEL_LAYOUT_STEREO, 973cabdff1aSopenharmony_ci { 0 }, 974cabdff1aSopenharmony_ci}; 975cabdff1aSopenharmony_ci 976cabdff1aSopenharmony_cistatic const AVOption options[] = { 977cabdff1aSopenharmony_ci { 978cabdff1aSopenharmony_ci .name = "block_size", 979cabdff1aSopenharmony_ci .help = "set the block size", 980cabdff1aSopenharmony_ci .offset = offsetof(ADPCMEncodeContext, block_size), 981cabdff1aSopenharmony_ci .type = AV_OPT_TYPE_INT, 982cabdff1aSopenharmony_ci .default_val = {.i64 = 1024}, 983cabdff1aSopenharmony_ci .min = 32, 984cabdff1aSopenharmony_ci .max = 8192, /* Is this a reasonable upper limit? */ 985cabdff1aSopenharmony_ci .flags = AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM 986cabdff1aSopenharmony_ci }, 987cabdff1aSopenharmony_ci { NULL } 988cabdff1aSopenharmony_ci}; 989cabdff1aSopenharmony_ci 990cabdff1aSopenharmony_cistatic const AVClass adpcm_encoder_class = { 991cabdff1aSopenharmony_ci .class_name = "ADPCM encoder", 992cabdff1aSopenharmony_ci .item_name = av_default_item_name, 993cabdff1aSopenharmony_ci .option = options, 994cabdff1aSopenharmony_ci .version = LIBAVUTIL_VERSION_INT, 995cabdff1aSopenharmony_ci}; 996cabdff1aSopenharmony_ci 997cabdff1aSopenharmony_ci#define ADPCM_ENCODER_0(id_, name_, sample_fmts_, capabilities_, long_name_) 998cabdff1aSopenharmony_ci#define ADPCM_ENCODER_1(id_, name_, sample_fmts_, capabilities_, long_name_) \ 999cabdff1aSopenharmony_ciconst FFCodec ff_ ## name_ ## _encoder = { \ 1000cabdff1aSopenharmony_ci .p.name = #name_, \ 1001cabdff1aSopenharmony_ci .p.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 1002cabdff1aSopenharmony_ci .p.type = AVMEDIA_TYPE_AUDIO, \ 1003cabdff1aSopenharmony_ci .p.id = id_, \ 1004cabdff1aSopenharmony_ci .p.sample_fmts = sample_fmts_, \ 1005cabdff1aSopenharmony_ci .p.ch_layouts = ch_layouts, \ 1006cabdff1aSopenharmony_ci .p.capabilities = capabilities_ | AV_CODEC_CAP_DR1, \ 1007cabdff1aSopenharmony_ci .p.priv_class = &adpcm_encoder_class, \ 1008cabdff1aSopenharmony_ci .priv_data_size = sizeof(ADPCMEncodeContext), \ 1009cabdff1aSopenharmony_ci .init = adpcm_encode_init, \ 1010cabdff1aSopenharmony_ci FF_CODEC_ENCODE_CB(adpcm_encode_frame), \ 1011cabdff1aSopenharmony_ci .close = adpcm_encode_close, \ 1012cabdff1aSopenharmony_ci .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \ 1013cabdff1aSopenharmony_ci}; 1014cabdff1aSopenharmony_ci#define ADPCM_ENCODER_2(enabled, codec_id, name, sample_fmts, capabilities, long_name) \ 1015cabdff1aSopenharmony_ci ADPCM_ENCODER_ ## enabled(codec_id, name, sample_fmts, capabilities, long_name) 1016cabdff1aSopenharmony_ci#define ADPCM_ENCODER_3(config, codec_id, name, sample_fmts, capabilities, long_name) \ 1017cabdff1aSopenharmony_ci ADPCM_ENCODER_2(config, codec_id, name, sample_fmts, capabilities, long_name) 1018cabdff1aSopenharmony_ci#define ADPCM_ENCODER(codec, name, sample_fmts, capabilities, long_name) \ 1019cabdff1aSopenharmony_ci ADPCM_ENCODER_3(CONFIG_ ## codec ## _ENCODER, AV_CODEC_ID_ ## codec, \ 1020cabdff1aSopenharmony_ci name, sample_fmts, capabilities, long_name) 1021cabdff1aSopenharmony_ci 1022cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_ARGO, adpcm_argo, sample_fmts_p, 0, "ADPCM Argonaut Games") 1023cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_AMV, adpcm_ima_amv, sample_fmts, 0, "ADPCM IMA AMV") 1024cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_APM, adpcm_ima_apm, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Ubisoft APM") 1025cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_ALP, adpcm_ima_alp, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA High Voltage Software ALP") 1026cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, 0, "ADPCM IMA QuickTime") 1027cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_SSI, adpcm_ima_ssi, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Simon & Schuster Interactive") 1028cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, 0, "ADPCM IMA WAV") 1029cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_IMA_WS, adpcm_ima_ws, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Westwood") 1030cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_MS, adpcm_ms, sample_fmts, 0, "ADPCM Microsoft") 1031cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_SWF, adpcm_swf, sample_fmts, 0, "ADPCM Shockwave Flash") 1032cabdff1aSopenharmony_ciADPCM_ENCODER(ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, 0, "ADPCM Yamaha") 1033