FFmpeg  4.1.11
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  *
16  * This file is part of FFmpeg.
17  *
18  * FFmpeg is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU Lesser General Public
20  * License as published by the Free Software Foundation; either
21  * version 2.1 of the License, or (at your option) any later version.
22  *
23  * FFmpeg is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26  * Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public
29  * License along with FFmpeg; if not, write to the Free Software
30  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31  */
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "bytestream.h"
35 #include "adpcm.h"
36 #include "adpcm_data.h"
37 #include "internal.h"
38 
39 /**
40  * @file
41  * ADPCM decoders
42  * Features and limitations:
43  *
44  * Reference documents:
45  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
46  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
47  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
48  * http://openquicktime.sourceforge.net/
49  * XAnim sources (xa_codec.c) http://xanim.polter.net/
50  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
51  * SoX source code http://sox.sourceforge.net/
52  *
53  * CD-ROM XA:
54  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
55  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
56  * readstr http://www.geocities.co.jp/Playtown/2004/
57  */
58 
59 /* These are for CD-ROM XA ADPCM */
60 static const int xa_adpcm_table[5][2] = {
61  { 0, 0 },
62  { 60, 0 },
63  { 115, -52 },
64  { 98, -55 },
65  { 122, -60 }
66 };
67 
68 static const int ea_adpcm_table[] = {
69  0, 240, 460, 392,
70  0, 0, -208, -220,
71  0, 1, 3, 4,
72  7, 8, 10, 11,
73  0, -1, -3, -4
74 };
75 
76 // padded to zero where table size is less then 16
77 static const int swf_index_tables[4][16] = {
78  /*2*/ { -1, 2 },
79  /*3*/ { -1, -1, 2, 4 },
80  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
81  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
82 };
83 
84 /* end of tables */
85 
86 typedef struct ADPCMDecodeContext {
88  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
91 
93 {
94  ADPCMDecodeContext *c = avctx->priv_data;
95  unsigned int min_channels = 1;
96  unsigned int max_channels = 2;
97 
98  switch(avctx->codec->id) {
101  min_channels = 2;
102  break;
108  max_channels = 6;
109  break;
111  min_channels = 2;
112  max_channels = 8;
113  if (avctx->channels & 1) {
114  avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
115  return AVERROR_PATCHWELCOME;
116  }
117  break;
119  max_channels = 8;
120  break;
124  max_channels = 14;
125  break;
126  }
127  if (avctx->channels < min_channels || avctx->channels > max_channels) {
128  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
129  return AVERROR(EINVAL);
130  }
131 
132  switch(avctx->codec->id) {
134  c->status[0].step = c->status[1].step = 511;
135  break;
137  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
138  return AVERROR_INVALIDDATA;
139  break;
141  if (avctx->extradata && avctx->extradata_size >= 8) {
142  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
143  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
144  }
145  break;
147  if (avctx->extradata && avctx->extradata_size >= 2)
148  c->vqa_version = AV_RL16(avctx->extradata);
149  break;
150  default:
151  break;
152  }
153 
154  switch(avctx->codec->id) {
172  break;
174  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
176  break;
177  default:
178  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
179  }
180 
181  return 0;
182 }
183 
184 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
185 {
186  int step_index;
187  int predictor;
188  int sign, delta, diff, step;
189 
190  step = ff_adpcm_step_table[c->step_index];
191  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
192  step_index = av_clip(step_index, 0, 88);
193 
194  sign = nibble & 8;
195  delta = nibble & 7;
196  /* perform direct multiplication instead of series of jumps proposed by
197  * the reference ADPCM implementation since modern CPUs can do the mults
198  * quickly enough */
199  diff = ((2 * delta + 1) * step) >> shift;
200  predictor = c->predictor;
201  if (sign) predictor -= diff;
202  else predictor += diff;
203 
204  c->predictor = av_clip_int16(predictor);
205  c->step_index = step_index;
206 
207  return (int16_t)c->predictor;
208 }
209 
211 {
212  int nibble, step_index, predictor, sign, delta, diff, step, shift;
213 
214  shift = bps - 1;
215  nibble = get_bits_le(gb, bps),
216  step = ff_adpcm_step_table[c->step_index];
217  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
218  step_index = av_clip(step_index, 0, 88);
219 
220  sign = nibble & (1 << shift);
221  delta = av_mod_uintp2(nibble, shift);
222  diff = ((2 * delta + 1) * step) >> shift;
223  predictor = c->predictor;
224  if (sign) predictor -= diff;
225  else predictor += diff;
226 
227  c->predictor = av_clip_int16(predictor);
228  c->step_index = step_index;
229 
230  return (int16_t)c->predictor;
231 }
232 
233 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
234 {
235  int step_index;
236  int predictor;
237  int diff, step;
238 
239  step = ff_adpcm_step_table[c->step_index];
240  step_index = c->step_index + ff_adpcm_index_table[nibble];
241  step_index = av_clip(step_index, 0, 88);
242 
243  diff = step >> 3;
244  if (nibble & 4) diff += step;
245  if (nibble & 2) diff += step >> 1;
246  if (nibble & 1) diff += step >> 2;
247 
248  if (nibble & 8)
249  predictor = c->predictor - diff;
250  else
251  predictor = c->predictor + diff;
252 
253  c->predictor = av_clip_int16(predictor);
254  c->step_index = step_index;
255 
256  return c->predictor;
257 }
258 
259 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
260 {
261  int predictor;
262 
263  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
264  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
265 
266  c->sample2 = c->sample1;
267  c->sample1 = av_clip_int16(predictor);
268  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
269  if (c->idelta < 16) c->idelta = 16;
270  if (c->idelta > INT_MAX/768) {
271  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
272  c->idelta = INT_MAX/768;
273  }
274 
275  return c->sample1;
276 }
277 
278 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
279 {
280  int step_index, predictor, sign, delta, diff, step;
281 
283  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
284  step_index = av_clip(step_index, 0, 48);
285 
286  sign = nibble & 8;
287  delta = nibble & 7;
288  diff = ((2 * delta + 1) * step) >> 3;
289  predictor = c->predictor;
290  if (sign) predictor -= diff;
291  else predictor += diff;
292 
293  c->predictor = av_clip_intp2(predictor, 11);
294  c->step_index = step_index;
295 
296  return c->predictor * 16;
297 }
298 
299 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
300 {
301  int sign, delta, diff;
302  int new_step;
303 
304  sign = nibble & 8;
305  delta = nibble & 7;
306  /* perform direct multiplication instead of series of jumps proposed by
307  * the reference ADPCM implementation since modern CPUs can do the mults
308  * quickly enough */
309  diff = ((2 * delta + 1) * c->step) >> 3;
310  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
311  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
312  c->predictor = av_clip_int16(c->predictor);
313  /* calculate new step and clamp it to range 511..32767 */
314  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
315  c->step = av_clip(new_step, 511, 32767);
316 
317  return (int16_t)c->predictor;
318 }
319 
320 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
321 {
322  int sign, delta, diff;
323 
324  sign = nibble & (1<<(size-1));
325  delta = nibble & ((1<<(size-1))-1);
326  diff = delta << (7 + c->step + shift);
327 
328  /* clamp result */
329  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
330 
331  /* calculate new step */
332  if (delta >= (2*size - 3) && c->step < 3)
333  c->step++;
334  else if (delta == 0 && c->step > 0)
335  c->step--;
336 
337  return (int16_t) c->predictor;
338 }
339 
341 {
342  if(!c->step) {
343  c->predictor = 0;
344  c->step = 127;
345  }
346 
347  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
348  c->predictor = av_clip_int16(c->predictor);
349  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
350  c->step = av_clip(c->step, 127, 24576);
351  return c->predictor;
352 }
353 
354 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
355 {
356  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
357  c->predictor = av_clip_int16(c->predictor);
358  c->step += ff_adpcm_index_table[nibble];
359  c->step = av_clip_uintp2(c->step, 5);
360  return c->predictor;
361 }
362 
363 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
364  const uint8_t *in, ADPCMChannelStatus *left,
365  ADPCMChannelStatus *right, int channels, int sample_offset)
366 {
367  int i, j;
368  int shift,filter,f0,f1;
369  int s_1,s_2;
370  int d,s,t;
371 
372  out0 += sample_offset;
373  if (channels == 1)
374  out1 = out0 + 28;
375  else
376  out1 += sample_offset;
377 
378  for(i=0;i<4;i++) {
379  shift = 12 - (in[4+i*2] & 15);
380  filter = in[4+i*2] >> 4;
381  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
382  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
383  filter=0;
384  }
385  if (shift < 0) {
386  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
387  shift = 0;
388  }
389  f0 = xa_adpcm_table[filter][0];
390  f1 = xa_adpcm_table[filter][1];
391 
392  s_1 = left->sample1;
393  s_2 = left->sample2;
394 
395  for(j=0;j<28;j++) {
396  d = in[16+i+j*4];
397 
398  t = sign_extend(d, 4);
399  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
400  s_2 = s_1;
401  s_1 = av_clip_int16(s);
402  out0[j] = s_1;
403  }
404 
405  if (channels == 2) {
406  left->sample1 = s_1;
407  left->sample2 = s_2;
408  s_1 = right->sample1;
409  s_2 = right->sample2;
410  }
411 
412  shift = 12 - (in[5+i*2] & 15);
413  filter = in[5+i*2] >> 4;
414  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
415  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
416  filter=0;
417  }
418  if (shift < 0) {
419  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
420  shift = 0;
421  }
422 
423  f0 = xa_adpcm_table[filter][0];
424  f1 = xa_adpcm_table[filter][1];
425 
426  for(j=0;j<28;j++) {
427  d = in[16+i+j*4];
428 
429  t = sign_extend(d >> 4, 4);
430  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
431  s_2 = s_1;
432  s_1 = av_clip_int16(s);
433  out1[j] = s_1;
434  }
435 
436  if (channels == 2) {
437  right->sample1 = s_1;
438  right->sample2 = s_2;
439  } else {
440  left->sample1 = s_1;
441  left->sample2 = s_2;
442  }
443 
444  out0 += 28 * (3 - channels);
445  out1 += 28 * (3 - channels);
446  }
447 
448  return 0;
449 }
450 
451 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
452 {
453  ADPCMDecodeContext *c = avctx->priv_data;
454  GetBitContext gb;
455  const int *table;
456  int k0, signmask, nb_bits, count;
457  int size = buf_size*8;
458  int i;
459 
460  init_get_bits(&gb, buf, size);
461 
462  //read bits & initial values
463  nb_bits = get_bits(&gb, 2)+2;
464  table = swf_index_tables[nb_bits-2];
465  k0 = 1 << (nb_bits-2);
466  signmask = 1 << (nb_bits-1);
467 
468  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
469  for (i = 0; i < avctx->channels; i++) {
470  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
471  c->status[i].step_index = get_bits(&gb, 6);
472  }
473 
474  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
475  int i;
476 
477  for (i = 0; i < avctx->channels; i++) {
478  // similar to IMA adpcm
479  int delta = get_bits(&gb, nb_bits);
480  int step = ff_adpcm_step_table[c->status[i].step_index];
481  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
482  int k = k0;
483 
484  do {
485  if (delta & k)
486  vpdiff += step;
487  step >>= 1;
488  k >>= 1;
489  } while(k);
490  vpdiff += step;
491 
492  if (delta & signmask)
493  c->status[i].predictor -= vpdiff;
494  else
495  c->status[i].predictor += vpdiff;
496 
497  c->status[i].step_index += table[delta & (~signmask)];
498 
499  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
500  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
501 
502  *samples++ = c->status[i].predictor;
503  }
504  }
505  }
506 }
507 
508 /**
509  * Get the number of samples that will be decoded from the packet.
510  * In one case, this is actually the maximum number of samples possible to
511  * decode with the given buf_size.
512  *
513  * @param[out] coded_samples set to the number of samples as coded in the
514  * packet, or 0 if the codec does not encode the
515  * number of samples in each frame.
516  * @param[out] approx_nb_samples set to non-zero if the number of samples
517  * returned is an approximation.
518  */
520  int buf_size, int *coded_samples, int *approx_nb_samples)
521 {
522  ADPCMDecodeContext *s = avctx->priv_data;
523  int nb_samples = 0;
524  int ch = avctx->channels;
525  int has_coded_samples = 0;
526  int header_size;
527 
528  *coded_samples = 0;
529  *approx_nb_samples = 0;
530 
531  if(ch <= 0)
532  return 0;
533 
534  switch (avctx->codec->id) {
535  /* constant, only check buf_size */
537  if (buf_size < 76 * ch)
538  return 0;
539  nb_samples = 128;
540  break;
542  if (buf_size < 34 * ch)
543  return 0;
544  nb_samples = 64;
545  break;
546  /* simple 4-bit adpcm */
554  nb_samples = buf_size * 2 / ch;
555  break;
556  }
557  if (nb_samples)
558  return nb_samples;
559 
560  /* simple 4-bit adpcm, with header */
561  header_size = 0;
562  switch (avctx->codec->id) {
565  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
566  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
567  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
568  }
569  if (header_size > 0)
570  return (buf_size - header_size) * 2 / ch;
571 
572  /* more complex formats */
573  switch (avctx->codec->id) {
575  has_coded_samples = 1;
576  *coded_samples = bytestream2_get_le32(gb);
577  *coded_samples -= *coded_samples % 28;
578  nb_samples = (buf_size - 12) / 30 * 28;
579  break;
581  has_coded_samples = 1;
582  *coded_samples = bytestream2_get_le32(gb);
583  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
584  break;
586  nb_samples = (buf_size - ch) / ch * 2;
587  break;
591  /* maximum number of samples */
592  /* has internal offsets and a per-frame switch to signal raw 16-bit */
593  has_coded_samples = 1;
594  switch (avctx->codec->id) {
596  header_size = 4 + 9 * ch;
597  *coded_samples = bytestream2_get_le32(gb);
598  break;
600  header_size = 4 + 5 * ch;
601  *coded_samples = bytestream2_get_le32(gb);
602  break;
604  header_size = 4 + 5 * ch;
605  *coded_samples = bytestream2_get_be32(gb);
606  break;
607  }
608  *coded_samples -= *coded_samples % 28;
609  nb_samples = (buf_size - header_size) * 2 / ch;
610  nb_samples -= nb_samples % 28;
611  *approx_nb_samples = 1;
612  break;
614  if (avctx->block_align > 0)
615  buf_size = FFMIN(buf_size, avctx->block_align);
616  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
617  break;
619  if (avctx->block_align > 0)
620  buf_size = FFMIN(buf_size, avctx->block_align);
621  if (buf_size < 4 * ch)
622  return AVERROR_INVALIDDATA;
623  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
624  break;
626  if (avctx->block_align > 0)
627  buf_size = FFMIN(buf_size, avctx->block_align);
628  nb_samples = (buf_size - 4 * ch) * 2 / ch;
629  break;
631  {
632  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
633  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
634  if (avctx->block_align > 0)
635  buf_size = FFMIN(buf_size, avctx->block_align);
636  if (buf_size < 4 * ch)
637  return AVERROR_INVALIDDATA;
638  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
639  break;
640  }
642  if (avctx->block_align > 0)
643  buf_size = FFMIN(buf_size, avctx->block_align);
644  nb_samples = (buf_size - 6 * ch) * 2 / ch;
645  break;
647  if (avctx->block_align > 0)
648  buf_size = FFMIN(buf_size, avctx->block_align);
649  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
650  break;
654  {
655  int samples_per_byte;
656  switch (avctx->codec->id) {
657  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
658  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
659  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
660  }
661  if (!s->status[0].step_index) {
662  if (buf_size < ch)
663  return AVERROR_INVALIDDATA;
664  nb_samples++;
665  buf_size -= ch;
666  }
667  nb_samples += buf_size * samples_per_byte / ch;
668  break;
669  }
671  {
672  int buf_bits = buf_size * 8 - 2;
673  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
674  int block_hdr_size = 22 * ch;
675  int block_size = block_hdr_size + nbits * ch * 4095;
676  int nblocks = buf_bits / block_size;
677  int bits_left = buf_bits - nblocks * block_size;
678  nb_samples = nblocks * 4096;
679  if (bits_left >= block_hdr_size)
680  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
681  break;
682  }
685  if (avctx->extradata) {
686  nb_samples = buf_size * 14 / (8 * ch);
687  break;
688  }
689  has_coded_samples = 1;
690  bytestream2_skip(gb, 4); // channel size
691  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
692  bytestream2_get_le32(gb) :
693  bytestream2_get_be32(gb);
694  buf_size -= 8 + 36 * ch;
695  buf_size /= ch;
696  nb_samples = buf_size / 8 * 14;
697  if (buf_size % 8 > 1)
698  nb_samples += (buf_size % 8 - 1) * 2;
699  *approx_nb_samples = 1;
700  break;
702  nb_samples = buf_size / (9 * ch) * 16;
703  break;
705  nb_samples = (buf_size / 128) * 224 / ch;
706  break;
709  nb_samples = buf_size / (16 * ch) * 28;
710  break;
711  }
712 
713  /* validate coded sample count */
714  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
715  return AVERROR_INVALIDDATA;
716 
717  return nb_samples;
718 }
719 
720 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
721  int *got_frame_ptr, AVPacket *avpkt)
722 {
723  AVFrame *frame = data;
724  const uint8_t *buf = avpkt->data;
725  int buf_size = avpkt->size;
726  ADPCMDecodeContext *c = avctx->priv_data;
727  ADPCMChannelStatus *cs;
728  int n, m, channel, i;
729  int16_t *samples;
730  int16_t **samples_p;
731  int st; /* stereo */
732  int count1, count2;
733  int nb_samples, coded_samples, approx_nb_samples, ret;
734  GetByteContext gb;
735 
736  bytestream2_init(&gb, buf, buf_size);
737  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
738  if (nb_samples <= 0) {
739  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
740  return AVERROR_INVALIDDATA;
741  }
742 
743  /* get output buffer */
744  frame->nb_samples = nb_samples;
745  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
746  return ret;
747  samples = (int16_t *)frame->data[0];
748  samples_p = (int16_t **)frame->extended_data;
749 
750  /* use coded_samples when applicable */
751  /* it is always <= nb_samples, so the output buffer will be large enough */
752  if (coded_samples) {
753  if (!approx_nb_samples && coded_samples != nb_samples)
754  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
755  frame->nb_samples = nb_samples = coded_samples;
756  }
757 
758  st = avctx->channels == 2 ? 1 : 0;
759 
760  switch(avctx->codec->id) {
762  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
763  Channel data is interleaved per-chunk. */
764  for (channel = 0; channel < avctx->channels; channel++) {
765  int predictor;
766  int step_index;
767  cs = &(c->status[channel]);
768  /* (pppppp) (piiiiiii) */
769 
770  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
771  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
772  step_index = predictor & 0x7F;
773  predictor &= ~0x7F;
774 
775  if (cs->step_index == step_index) {
776  int diff = predictor - cs->predictor;
777  if (diff < 0)
778  diff = - diff;
779  if (diff > 0x7f)
780  goto update;
781  } else {
782  update:
783  cs->step_index = step_index;
784  cs->predictor = predictor;
785  }
786 
787  if (cs->step_index > 88u){
788  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
789  channel, cs->step_index);
790  return AVERROR_INVALIDDATA;
791  }
792 
793  samples = samples_p[channel];
794 
795  for (m = 0; m < 64; m += 2) {
796  int byte = bytestream2_get_byteu(&gb);
797  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
798  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
799  }
800  }
801  break;
803  for(i=0; i<avctx->channels; i++){
804  cs = &(c->status[i]);
805  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
806 
807  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
808  if (cs->step_index > 88u){
809  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
810  i, cs->step_index);
811  return AVERROR_INVALIDDATA;
812  }
813  }
814 
815  if (avctx->bits_per_coded_sample != 4) {
816  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
817  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
820 
821  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
822  for (i = 0; i < avctx->channels; i++) {
823  int j;
824 
825  cs = &c->status[i];
826  samples = &samples_p[i][1 + n * samples_per_block];
827  for (j = 0; j < block_size; j++) {
828  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
829  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
830  }
831  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
832  if (ret < 0)
833  return ret;
834  for (m = 0; m < samples_per_block; m++) {
835  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
836  avctx->bits_per_coded_sample);
837  }
838  }
839  }
840  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
841  } else {
842  for (n = 0; n < (nb_samples - 1) / 8; n++) {
843  for (i = 0; i < avctx->channels; i++) {
844  cs = &c->status[i];
845  samples = &samples_p[i][1 + n * 8];
846  for (m = 0; m < 8; m += 2) {
847  int v = bytestream2_get_byteu(&gb);
848  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
849  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
850  }
851  }
852  }
853  }
854  break;
856  for (i = 0; i < avctx->channels; i++)
857  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
858 
859  for (i = 0; i < avctx->channels; i++) {
860  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
861  if (c->status[i].step_index > 88u) {
862  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
863  i, c->status[i].step_index);
864  return AVERROR_INVALIDDATA;
865  }
866  }
867 
868  for (i = 0; i < avctx->channels; i++) {
869  samples = (int16_t *)frame->data[i];
870  cs = &c->status[i];
871  for (n = nb_samples >> 1; n > 0; n--) {
872  int v = bytestream2_get_byteu(&gb);
873  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
874  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
875  }
876  }
877  break;
879  {
880  int block_predictor;
881 
882  block_predictor = bytestream2_get_byteu(&gb);
883  if (block_predictor > 6) {
884  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
885  block_predictor);
886  return AVERROR_INVALIDDATA;
887  }
888  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
889  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
890  if (st) {
891  block_predictor = bytestream2_get_byteu(&gb);
892  if (block_predictor > 6) {
893  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
894  block_predictor);
895  return AVERROR_INVALIDDATA;
896  }
897  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
898  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
899  }
900  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
901  if (st){
902  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
903  }
904 
905  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
906  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
907  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
908  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
909 
910  *samples++ = c->status[0].sample2;
911  if (st) *samples++ = c->status[1].sample2;
912  *samples++ = c->status[0].sample1;
913  if (st) *samples++ = c->status[1].sample1;
914  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
915  int byte = bytestream2_get_byteu(&gb);
916  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
917  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
918  }
919  break;
920  }
922  for (channel = 0; channel < avctx->channels; channel+=2) {
923  bytestream2_skipu(&gb, 4);
924  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
925  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
926  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
927  bytestream2_skipu(&gb, 2);
928  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
929  bytestream2_skipu(&gb, 2);
930  for (n = 0; n < nb_samples; n+=2) {
931  int v = bytestream2_get_byteu(&gb);
932  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
933  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
934  }
935  for (n = 0; n < nb_samples; n+=2) {
936  int v = bytestream2_get_byteu(&gb);
937  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
938  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
939  }
940  }
941  break;
943  for (channel = 0; channel < avctx->channels; channel++) {
944  cs = &c->status[channel];
945  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
946  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
947  if (cs->step_index > 88u){
948  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
949  channel, cs->step_index);
950  return AVERROR_INVALIDDATA;
951  }
952  }
953  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
954  int v = bytestream2_get_byteu(&gb);
955  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
956  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
957  }
958  break;
960  {
961  int last_byte = 0;
962  int nibble;
963  int decode_top_nibble_next = 0;
964  int diff_channel;
965  const int16_t *samples_end = samples + avctx->channels * nb_samples;
966 
967  bytestream2_skipu(&gb, 10);
968  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
969  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
970  c->status[0].step_index = bytestream2_get_byteu(&gb);
971  c->status[1].step_index = bytestream2_get_byteu(&gb);
972  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
973  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
974  c->status[0].step_index, c->status[1].step_index);
975  return AVERROR_INVALIDDATA;
976  }
977  /* sign extend the predictors */
978  diff_channel = c->status[1].predictor;
979 
980  /* DK3 ADPCM support macro */
981 #define DK3_GET_NEXT_NIBBLE() \
982  if (decode_top_nibble_next) { \
983  nibble = last_byte >> 4; \
984  decode_top_nibble_next = 0; \
985  } else { \
986  last_byte = bytestream2_get_byteu(&gb); \
987  nibble = last_byte & 0x0F; \
988  decode_top_nibble_next = 1; \
989  }
990 
991  while (samples < samples_end) {
992 
993  /* for this algorithm, c->status[0] is the sum channel and
994  * c->status[1] is the diff channel */
995 
996  /* process the first predictor of the sum channel */
998  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
999 
1000  /* process the diff channel predictor */
1002  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1003 
1004  /* process the first pair of stereo PCM samples */
1005  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1006  *samples++ = c->status[0].predictor + c->status[1].predictor;
1007  *samples++ = c->status[0].predictor - c->status[1].predictor;
1008 
1009  /* process the second predictor of the sum channel */
1011  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1012 
1013  /* process the second pair of stereo PCM samples */
1014  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1015  *samples++ = c->status[0].predictor + c->status[1].predictor;
1016  *samples++ = c->status[0].predictor - c->status[1].predictor;
1017  }
1018 
1019  if ((bytestream2_tell(&gb) & 1))
1020  bytestream2_skip(&gb, 1);
1021  break;
1022  }
1024  for (channel = 0; channel < avctx->channels; channel++) {
1025  cs = &c->status[channel];
1026  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1027  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1028  if (cs->step_index > 88u){
1029  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1030  channel, cs->step_index);
1031  return AVERROR_INVALIDDATA;
1032  }
1033  }
1034 
1035  for (n = nb_samples >> (1 - st); n > 0; n--) {
1036  int v1, v2;
1037  int v = bytestream2_get_byteu(&gb);
1038  /* nibbles are swapped for mono */
1039  if (st) {
1040  v1 = v >> 4;
1041  v2 = v & 0x0F;
1042  } else {
1043  v2 = v >> 4;
1044  v1 = v & 0x0F;
1045  }
1046  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1047  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1048  }
1049  break;
1051  for (channel = 0; channel < avctx->channels; channel++) {
1052  cs = &c->status[channel];
1053  samples = samples_p[channel];
1054  bytestream2_skip(&gb, 4);
1055  for (n = 0; n < nb_samples; n += 2) {
1056  int v = bytestream2_get_byteu(&gb);
1057  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1058  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1059  }
1060  }
1061  break;
1063  while (bytestream2_get_bytes_left(&gb) > 0) {
1064  int v = bytestream2_get_byteu(&gb);
1065  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1066  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1067  }
1068  break;
1070  while (bytestream2_get_bytes_left(&gb) > 0) {
1071  int v = bytestream2_get_byteu(&gb);
1072  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1073  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1074  }
1075  break;
1077  for (channel = 0; channel < avctx->channels; channel++) {
1078  cs = &c->status[channel];
1079  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1080  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1081  if (cs->step_index > 88u){
1082  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1083  channel, cs->step_index);
1084  return AVERROR_INVALIDDATA;
1085  }
1086  }
1087  for (n = 0; n < nb_samples / 2; n++) {
1088  int byte[2];
1089 
1090  byte[0] = bytestream2_get_byteu(&gb);
1091  if (st)
1092  byte[1] = bytestream2_get_byteu(&gb);
1093  for(channel = 0; channel < avctx->channels; channel++) {
1094  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1095  }
1096  for(channel = 0; channel < avctx->channels; channel++) {
1097  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1098  }
1099  }
1100  break;
1102  if (c->vqa_version == 3) {
1103  for (channel = 0; channel < avctx->channels; channel++) {
1104  int16_t *smp = samples_p[channel];
1105 
1106  for (n = nb_samples / 2; n > 0; n--) {
1107  int v = bytestream2_get_byteu(&gb);
1108  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1109  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1110  }
1111  }
1112  } else {
1113  for (n = nb_samples / 2; n > 0; n--) {
1114  for (channel = 0; channel < avctx->channels; channel++) {
1115  int v = bytestream2_get_byteu(&gb);
1116  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1117  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1118  }
1119  samples += avctx->channels;
1120  }
1121  }
1122  bytestream2_seek(&gb, 0, SEEK_END);
1123  break;
1124  case AV_CODEC_ID_ADPCM_XA:
1125  {
1126  int16_t *out0 = samples_p[0];
1127  int16_t *out1 = samples_p[1];
1128  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1129  int sample_offset = 0;
1130  int bytes_remaining;
1131  while (bytestream2_get_bytes_left(&gb) >= 128) {
1132  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1133  &c->status[0], &c->status[1],
1134  avctx->channels, sample_offset)) < 0)
1135  return ret;
1136  bytestream2_skipu(&gb, 128);
1137  sample_offset += samples_per_block;
1138  }
1139  /* Less than a full block of data left, e.g. when reading from
1140  * 2324 byte per sector XA; the remainder is padding */
1141  bytes_remaining = bytestream2_get_bytes_left(&gb);
1142  if (bytes_remaining > 0) {
1143  bytestream2_skip(&gb, bytes_remaining);
1144  }
1145  break;
1146  }
1148  for (i=0; i<=st; i++) {
1149  c->status[i].step_index = bytestream2_get_le32u(&gb);
1150  if (c->status[i].step_index > 88u) {
1151  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1152  i, c->status[i].step_index);
1153  return AVERROR_INVALIDDATA;
1154  }
1155  }
1156  for (i=0; i<=st; i++) {
1157  c->status[i].predictor = bytestream2_get_le32u(&gb);
1158  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1159  return AVERROR_INVALIDDATA;
1160  }
1161 
1162  for (n = nb_samples >> (1 - st); n > 0; n--) {
1163  int byte = bytestream2_get_byteu(&gb);
1164  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1165  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1166  }
1167  break;
1169  for (n = nb_samples >> (1 - st); n > 0; n--) {
1170  int byte = bytestream2_get_byteu(&gb);
1171  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1172  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1173  }
1174  break;
1175  case AV_CODEC_ID_ADPCM_EA:
1176  {
1177  int previous_left_sample, previous_right_sample;
1178  int current_left_sample, current_right_sample;
1179  int next_left_sample, next_right_sample;
1180  int coeff1l, coeff2l, coeff1r, coeff2r;
1181  int shift_left, shift_right;
1182 
1183  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1184  each coding 28 stereo samples. */
1185 
1186  if(avctx->channels != 2)
1187  return AVERROR_INVALIDDATA;
1188 
1189  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1190  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1191  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1192  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1193 
1194  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1195  int byte = bytestream2_get_byteu(&gb);
1196  coeff1l = ea_adpcm_table[ byte >> 4 ];
1197  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1198  coeff1r = ea_adpcm_table[ byte & 0x0F];
1199  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1200 
1201  byte = bytestream2_get_byteu(&gb);
1202  shift_left = 20 - (byte >> 4);
1203  shift_right = 20 - (byte & 0x0F);
1204 
1205  for (count2 = 0; count2 < 28; count2++) {
1206  byte = bytestream2_get_byteu(&gb);
1207  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1208  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1209 
1210  next_left_sample = (next_left_sample +
1211  (current_left_sample * coeff1l) +
1212  (previous_left_sample * coeff2l) + 0x80) >> 8;
1213  next_right_sample = (next_right_sample +
1214  (current_right_sample * coeff1r) +
1215  (previous_right_sample * coeff2r) + 0x80) >> 8;
1216 
1217  previous_left_sample = current_left_sample;
1218  current_left_sample = av_clip_int16(next_left_sample);
1219  previous_right_sample = current_right_sample;
1220  current_right_sample = av_clip_int16(next_right_sample);
1221  *samples++ = current_left_sample;
1222  *samples++ = current_right_sample;
1223  }
1224  }
1225 
1226  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1227 
1228  break;
1229  }
1231  {
1232  int coeff[2][2], shift[2];
1233 
1234  for(channel = 0; channel < avctx->channels; channel++) {
1235  int byte = bytestream2_get_byteu(&gb);
1236  for (i=0; i<2; i++)
1237  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1238  shift[channel] = 20 - (byte & 0x0F);
1239  }
1240  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1241  int byte[2];
1242 
1243  byte[0] = bytestream2_get_byteu(&gb);
1244  if (st) byte[1] = bytestream2_get_byteu(&gb);
1245  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1246  for(channel = 0; channel < avctx->channels; channel++) {
1247  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1248  sample = (sample +
1249  c->status[channel].sample1 * coeff[channel][0] +
1250  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1252  c->status[channel].sample1 = av_clip_int16(sample);
1253  *samples++ = c->status[channel].sample1;
1254  }
1255  }
1256  }
1257  bytestream2_seek(&gb, 0, SEEK_END);
1258  break;
1259  }
1262  case AV_CODEC_ID_ADPCM_EA_R3: {
1263  /* channel numbering
1264  2chan: 0=fl, 1=fr
1265  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1266  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1267  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1268  int previous_sample, current_sample, next_sample;
1269  int coeff1, coeff2;
1270  int shift;
1271  unsigned int channel;
1272  uint16_t *samplesC;
1273  int count = 0;
1274  int offsets[6];
1275 
1276  for (channel=0; channel<avctx->channels; channel++)
1277  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1278  bytestream2_get_le32(&gb)) +
1279  (avctx->channels + 1) * 4;
1280 
1281  for (channel=0; channel<avctx->channels; channel++) {
1282  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1283  samplesC = samples_p[channel];
1284 
1285  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1286  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1287  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1288  } else {
1289  current_sample = c->status[channel].predictor;
1290  previous_sample = c->status[channel].prev_sample;
1291  }
1292 
1293  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1294  int byte = bytestream2_get_byte(&gb);
1295  if (byte == 0xEE) { /* only seen in R2 and R3 */
1296  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1297  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1298 
1299  for (count2=0; count2<28; count2++)
1300  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1301  } else {
1302  coeff1 = ea_adpcm_table[ byte >> 4 ];
1303  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1304  shift = 20 - (byte & 0x0F);
1305 
1306  for (count2=0; count2<28; count2++) {
1307  if (count2 & 1)
1308  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1309  else {
1310  byte = bytestream2_get_byte(&gb);
1311  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1312  }
1313 
1314  next_sample += (current_sample * coeff1) +
1315  (previous_sample * coeff2);
1316  next_sample = av_clip_int16(next_sample >> 8);
1317 
1318  previous_sample = current_sample;
1319  current_sample = next_sample;
1320  *samplesC++ = current_sample;
1321  }
1322  }
1323  }
1324  if (!count) {
1325  count = count1;
1326  } else if (count != count1) {
1327  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1328  count = FFMAX(count, count1);
1329  }
1330 
1331  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1332  c->status[channel].predictor = current_sample;
1333  c->status[channel].prev_sample = previous_sample;
1334  }
1335  }
1336 
1337  frame->nb_samples = count * 28;
1338  bytestream2_seek(&gb, 0, SEEK_END);
1339  break;
1340  }
1342  for (channel=0; channel<avctx->channels; channel++) {
1343  int coeff[2][4], shift[4];
1344  int16_t *s = samples_p[channel];
1345  for (n = 0; n < 4; n++, s += 32) {
1346  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1347  for (i=0; i<2; i++)
1348  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1349  s[0] = val & ~0x0F;
1350 
1351  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1352  shift[n] = 20 - (val & 0x0F);
1353  s[1] = val & ~0x0F;
1354  }
1355 
1356  for (m=2; m<32; m+=2) {
1357  s = &samples_p[channel][m];
1358  for (n = 0; n < 4; n++, s += 32) {
1359  int level, pred;
1360  int byte = bytestream2_get_byteu(&gb);
1361 
1362  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1363  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1364  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1365 
1366  level = sign_extend(byte, 4) * (1 << shift[n]);
1367  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1368  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1369  }
1370  }
1371  }
1372  break;
1374  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1375  c->status[0].step_index = bytestream2_get_byteu(&gb);
1376  bytestream2_skipu(&gb, 5);
1377  if (c->status[0].step_index > 88u) {
1378  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1379  c->status[0].step_index);
1380  return AVERROR_INVALIDDATA;
1381  }
1382 
1383  for (n = nb_samples >> (1 - st); n > 0; n--) {
1384  int v = bytestream2_get_byteu(&gb);
1385 
1386  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1387  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1388  }
1389  break;
1391  for (i = 0; i < avctx->channels; i++) {
1392  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1393  c->status[i].step_index = bytestream2_get_byteu(&gb);
1394  bytestream2_skipu(&gb, 1);
1395  if (c->status[i].step_index > 88u) {
1396  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1397  c->status[i].step_index);
1398  return AVERROR_INVALIDDATA;
1399  }
1400  }
1401 
1402  for (n = nb_samples >> (1 - st); n > 0; n--) {
1403  int v = bytestream2_get_byteu(&gb);
1404 
1405  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
1406  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
1407  }
1408  break;
1409  case AV_CODEC_ID_ADPCM_CT:
1410  for (n = nb_samples >> (1 - st); n > 0; n--) {
1411  int v = bytestream2_get_byteu(&gb);
1412  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1413  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1414  }
1415  break;
1419  if (!c->status[0].step_index) {
1420  /* the first byte is a raw sample */
1421  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1422  if (st)
1423  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1424  c->status[0].step_index = 1;
1425  nb_samples--;
1426  }
1427  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1428  for (n = nb_samples >> (1 - st); n > 0; n--) {
1429  int byte = bytestream2_get_byteu(&gb);
1430  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1431  byte >> 4, 4, 0);
1432  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1433  byte & 0x0F, 4, 0);
1434  }
1435  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1436  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1437  int byte = bytestream2_get_byteu(&gb);
1438  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1439  byte >> 5 , 3, 0);
1440  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1441  (byte >> 2) & 0x07, 3, 0);
1442  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1443  byte & 0x03, 2, 0);
1444  }
1445  } else {
1446  for (n = nb_samples >> (2 - st); n > 0; n--) {
1447  int byte = bytestream2_get_byteu(&gb);
1448  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1449  byte >> 6 , 2, 2);
1450  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1451  (byte >> 4) & 0x03, 2, 2);
1452  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1453  (byte >> 2) & 0x03, 2, 2);
1454  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1455  byte & 0x03, 2, 2);
1456  }
1457  }
1458  break;
1459  case AV_CODEC_ID_ADPCM_SWF:
1460  adpcm_swf_decode(avctx, buf, buf_size, samples);
1461  bytestream2_seek(&gb, 0, SEEK_END);
1462  break;
1464  for (n = nb_samples >> (1 - st); n > 0; n--) {
1465  int v = bytestream2_get_byteu(&gb);
1466  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1467  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1468  }
1469  break;
1471  if (!c->has_status) {
1472  for (channel = 0; channel < avctx->channels; channel++)
1473  c->status[channel].step = 0;
1474  c->has_status = 1;
1475  }
1476  for (channel = 0; channel < avctx->channels; channel++) {
1477  samples = samples_p[channel];
1478  for (n = nb_samples >> 1; n > 0; n--) {
1479  int v = bytestream2_get_byteu(&gb);
1480  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1481  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1482  }
1483  }
1484  break;
1485  case AV_CODEC_ID_ADPCM_AFC:
1486  {
1487  int samples_per_block;
1488  int blocks;
1489 
1490  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1491  samples_per_block = avctx->extradata[0] / 16;
1492  blocks = nb_samples / avctx->extradata[0];
1493  } else {
1494  samples_per_block = nb_samples / 16;
1495  blocks = 1;
1496  }
1497 
1498  for (m = 0; m < blocks; m++) {
1499  for (channel = 0; channel < avctx->channels; channel++) {
1500  int prev1 = c->status[channel].sample1;
1501  int prev2 = c->status[channel].sample2;
1502 
1503  samples = samples_p[channel] + m * 16;
1504  /* Read in every sample for this channel. */
1505  for (i = 0; i < samples_per_block; i++) {
1506  int byte = bytestream2_get_byteu(&gb);
1507  int scale = 1 << (byte >> 4);
1508  int index = byte & 0xf;
1509  int factor1 = ff_adpcm_afc_coeffs[0][index];
1510  int factor2 = ff_adpcm_afc_coeffs[1][index];
1511 
1512  /* Decode 16 samples. */
1513  for (n = 0; n < 16; n++) {
1514  int32_t sampledat;
1515 
1516  if (n & 1) {
1517  sampledat = sign_extend(byte, 4);
1518  } else {
1519  byte = bytestream2_get_byteu(&gb);
1520  sampledat = sign_extend(byte >> 4, 4);
1521  }
1522 
1523  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1524  sampledat * scale;
1525  *samples = av_clip_int16(sampledat);
1526  prev2 = prev1;
1527  prev1 = *samples++;
1528  }
1529  }
1530 
1531  c->status[channel].sample1 = prev1;
1532  c->status[channel].sample2 = prev2;
1533  }
1534  }
1535  bytestream2_seek(&gb, 0, SEEK_END);
1536  break;
1537  }
1538  case AV_CODEC_ID_ADPCM_THP:
1540  {
1541  int table[14][16];
1542  int ch;
1543 
1544 #define THP_GET16(g) \
1545  sign_extend( \
1546  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1547  bytestream2_get_le16u(&(g)) : \
1548  bytestream2_get_be16u(&(g)), 16)
1549 
1550  if (avctx->extradata) {
1552  if (avctx->extradata_size < 32 * avctx->channels) {
1553  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1554  return AVERROR_INVALIDDATA;
1555  }
1556 
1557  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1558  for (i = 0; i < avctx->channels; i++)
1559  for (n = 0; n < 16; n++)
1560  table[i][n] = THP_GET16(tb);
1561  } else {
1562  for (i = 0; i < avctx->channels; i++)
1563  for (n = 0; n < 16; n++)
1564  table[i][n] = THP_GET16(gb);
1565 
1566  if (!c->has_status) {
1567  /* Initialize the previous sample. */
1568  for (i = 0; i < avctx->channels; i++) {
1569  c->status[i].sample1 = THP_GET16(gb);
1570  c->status[i].sample2 = THP_GET16(gb);
1571  }
1572  c->has_status = 1;
1573  } else {
1574  bytestream2_skip(&gb, avctx->channels * 4);
1575  }
1576  }
1577 
1578  for (ch = 0; ch < avctx->channels; ch++) {
1579  samples = samples_p[ch];
1580 
1581  /* Read in every sample for this channel. */
1582  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1583  int byte = bytestream2_get_byteu(&gb);
1584  int index = (byte >> 4) & 7;
1585  unsigned int exp = byte & 0x0F;
1586  int64_t factor1 = table[ch][index * 2];
1587  int64_t factor2 = table[ch][index * 2 + 1];
1588 
1589  /* Decode 14 samples. */
1590  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1591  int32_t sampledat;
1592 
1593  if (n & 1) {
1594  sampledat = sign_extend(byte, 4);
1595  } else {
1596  byte = bytestream2_get_byteu(&gb);
1597  sampledat = sign_extend(byte >> 4, 4);
1598  }
1599 
1600  sampledat = ((c->status[ch].sample1 * factor1
1601  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1602  *samples = av_clip_int16(sampledat);
1603  c->status[ch].sample2 = c->status[ch].sample1;
1604  c->status[ch].sample1 = *samples++;
1605  }
1606  }
1607  }
1608  break;
1609  }
1610  case AV_CODEC_ID_ADPCM_DTK:
1611  for (channel = 0; channel < avctx->channels; channel++) {
1612  samples = samples_p[channel];
1613 
1614  /* Read in every sample for this channel. */
1615  for (i = 0; i < nb_samples / 28; i++) {
1616  int byte, header;
1617  if (channel)
1618  bytestream2_skipu(&gb, 1);
1619  header = bytestream2_get_byteu(&gb);
1620  bytestream2_skipu(&gb, 3 - channel);
1621 
1622  /* Decode 28 samples. */
1623  for (n = 0; n < 28; n++) {
1624  int32_t sampledat, prev;
1625 
1626  switch (header >> 4) {
1627  case 1:
1628  prev = (c->status[channel].sample1 * 0x3c);
1629  break;
1630  case 2:
1631  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1632  break;
1633  case 3:
1634  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1635  break;
1636  default:
1637  prev = 0;
1638  }
1639 
1640  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1641 
1642  byte = bytestream2_get_byteu(&gb);
1643  if (!channel)
1644  sampledat = sign_extend(byte, 4);
1645  else
1646  sampledat = sign_extend(byte >> 4, 4);
1647 
1648  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1649  *samples++ = av_clip_int16(sampledat >> 6);
1651  c->status[channel].sample1 = sampledat;
1652  }
1653  }
1654  if (!channel)
1655  bytestream2_seek(&gb, 0, SEEK_SET);
1656  }
1657  break;
1658  case AV_CODEC_ID_ADPCM_PSX:
1659  for (channel = 0; channel < avctx->channels; channel++) {
1660  samples = samples_p[channel];
1661 
1662  /* Read in every sample for this channel. */
1663  for (i = 0; i < nb_samples / 28; i++) {
1664  int filter, shift, flag, byte;
1665 
1666  filter = bytestream2_get_byteu(&gb);
1667  shift = filter & 0xf;
1668  filter = filter >> 4;
1669  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
1670  return AVERROR_INVALIDDATA;
1671  flag = bytestream2_get_byteu(&gb);
1672 
1673  /* Decode 28 samples. */
1674  for (n = 0; n < 28; n++) {
1675  int sample = 0, scale;
1676 
1677  if (flag < 0x07) {
1678  if (n & 1) {
1679  scale = sign_extend(byte >> 4, 4);
1680  } else {
1681  byte = bytestream2_get_byteu(&gb);
1682  scale = sign_extend(byte, 4);
1683  }
1684 
1685  scale = scale * (1 << 12);
1686  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
1687  }
1688  *samples++ = av_clip_int16(sample);
1690  c->status[channel].sample1 = sample;
1691  }
1692  }
1693  }
1694  break;
1695 
1696  default:
1697  return -1;
1698  }
1699 
1700  if (avpkt->size && bytestream2_tell(&gb) == 0) {
1701  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
1702  return AVERROR_INVALIDDATA;
1703  }
1704 
1705  *got_frame_ptr = 1;
1706 
1707  if (avpkt->size < bytestream2_tell(&gb)) {
1708  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
1709  return avpkt->size;
1710  }
1711 
1712  return bytestream2_tell(&gb);
1713 }
1714 
1715 static void adpcm_flush(AVCodecContext *avctx)
1716 {
1717  ADPCMDecodeContext *c = avctx->priv_data;
1718  c->has_status = 0;
1719 }
1720 
1721 
1729 
1730 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1731 AVCodec ff_ ## name_ ## _decoder = { \
1732  .name = #name_, \
1733  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1734  .type = AVMEDIA_TYPE_AUDIO, \
1735  .id = id_, \
1736  .priv_data_size = sizeof(ADPCMDecodeContext), \
1737  .init = adpcm_decode_init, \
1738  .decode = adpcm_decode_frame, \
1739  .flush = adpcm_flush, \
1740  .capabilities = AV_CODEC_CAP_DR1, \
1741  .sample_fmts = sample_fmts_, \
1742 }
1743 
1744 /* Note: Do not forget to add new entries to the Makefile as well. */
1745 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
1746 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
1747 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
1748 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
1749 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
1750 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
1751 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
1752 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
1753 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
1754 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
1755 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
1756 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
1757 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
1758 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
1759 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
1760 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
1761 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
1762 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
1763 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
1764 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
1765 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
1766 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
1767 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
1768 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
1769 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
1770 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
1771 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
1772 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
1773 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
1774 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
1775 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
1776 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
1777 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
1778 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
1779 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
1780 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1542
const char const char void * val
Definition: avisynth_c.h:771
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
int size
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:354
#define THP_GET16(g)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:381
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
#define avpriv_request_sample(...)
channels
Definition: aptx.c:30
int size
Definition: avcodec.h:1446
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define AV_RL16
Definition: intreadwrite.h:42
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:1722
#define sample
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2226
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:361
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:259
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:153
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2197
uint8_t
#define av_cold
Definition: attributes.h:82
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:92
float delta
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:1715
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:451
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1634
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:253
static const int xa_adpcm_table[5][2]
Definition: adpcm.c:60
ADPCM tables.
static AVFrame * frame
const char data[16]
Definition: mxf.c:91
uint8_t * data
Definition: avcodec.h:1445
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples that will be decoded from the packet.
Definition: adpcm.c:519
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2750
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
enum AVCodecID id
Definition: avcodec.h:3438
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:258
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ADPCM encoder/decoder common header.
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static const int ea_adpcm_table[]
Definition: adpcm.c:68
#define AVERROR(e)
Definition: error.h:43
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
Definition: adpcm.c:233
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:278
#define FFMAX(a, b)
Definition: common.h:94
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:210
int8_t exp
Definition: eval.c:72
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:363
#define FFMIN(a, b)
Definition: common.h:96
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:88
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:1724
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_RL32
Definition: intreadwrite.h:146
int n
Definition: avisynth_c.h:684
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static const int swf_index_tables[4][16]
Definition: adpcm.c:77
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:650
main external API structure.
Definition: avcodec.h:1533
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:340
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1919
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:184
void * buf
Definition: avisynth_c.h:690
int extradata_size
Definition: avcodec.h:1635
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:615
ADPCMChannelStatus status[14]
Definition: adpcm.c:87
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:320
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:417
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
uint8_t level
Definition: svq3.c:207
int
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:720
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:602
static double c[64]
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
unsigned bps
Definition: movenc.c:1479
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:782
void * priv_data
Definition: avcodec.h:1560
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:712
int channels
number of audio channels
Definition: avcodec.h:2190
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:299
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:1726
void INT64 INT64 count
Definition: avisynth_c.h:690
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:67
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:273
This structure stores compressed data.
Definition: avcodec.h:1422
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:292
for(j=16;j >0;--j)
#define tb
Definition: regdef.h:68
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:1730