Lines Matching refs:gr
592 ol_sf -= ifqstep*scalefac[gr][ch].s[sfb][i];
684 ol_sf -= ifqstep*scalefac[gr][ch].l[sfb];
1232 reduce_bit_usage(lame_internal_flags * gfc, int gr, int ch
1239 gr_info *const cod_info = &gfc->l3_side.tt[gr][ch];
1242 best_scalefac_store(gfc, gr, ch, &gfc->l3_side);
1271 int gr, ch;
1276 for (gr = 0; gr < ngr; ++gr) {
1277 max_nbits_gr[gr] = 0;
1279 max_nbits_ch[gr][ch] = max_bits[gr][ch];
1280 use_nbits_ch[gr][ch] = 0;
1281 max_nbits_gr[gr] += max_bits[gr][ch];
1282 max_nbits_fr += max_bits[gr][ch];
1283 that_[gr][ch].find = (cfg->full_outer_loop < 0) ? guess_scalefac_x34 : find_scalefac_x34;
1284 that_[gr][ch].gfc = gfc;
1285 that_[gr][ch].cod_info = &gfc->l3_side.tt[gr][ch];
1286 that_[gr][ch].xr34orig = xr34orig[gr][ch];
1287 if (that_[gr][ch].cod_info->block_type == SHORT_TYPE) {
1288 that_[gr][ch].alloc = short_block_constrain;
1291 that_[gr][ch].alloc = long_block_constrain;
1297 for (gr = 0; gr < ngr; ++gr) {
1299 if (max_bits[gr][ch] > 0) {
1300 algo_t *that = &that_[gr][ch];
1301 int *sfwork = sfwork_[gr][ch];
1302 int *vbrsfmin = vbrsfmin_[gr][ch];
1305 vbrmax = block_sf(that, l3_xmin[gr][ch], sfwork, vbrsfmin);
1320 for (gr = 0; gr < ngr; ++gr) {
1321 use_nbits_gr[gr] = 0;
1323 algo_t const *that = &that_[gr][ch];
1324 if (max_bits[gr][ch] > 0) {
1334 use_nbits_ch[gr][ch] = reduce_bit_usage(gfc, gr, ch);
1335 use_nbits_gr[gr] += use_nbits_ch[gr][ch];
1337 use_nbits_fr += use_nbits_gr[gr];
1344 for (gr = 0; gr < ngr; ++gr) {
1345 if (use_nbits_gr[gr] > MAX_BITS_PER_GRANULE) {
1352 if (use_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
1376 for (gr = 0; gr < ngr; ++gr) {
1377 max_nbits_gr[gr] = 0;
1379 if (use_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
1380 max_nbits_ch[gr][ch] = MAX_BITS_PER_CHANNEL;
1383 max_nbits_ch[gr][ch] = use_nbits_ch[gr][ch];
1385 max_nbits_gr[gr] += max_nbits_ch[gr][ch];
1387 if (max_nbits_gr[gr] > MAX_BITS_PER_GRANULE) {
1390 if (max_nbits_ch[gr][ch] > 0) {
1391 f[ch] = sqrt(sqrt(max_nbits_ch[gr][ch]));
1400 max_nbits_ch[gr][ch] = MAX_BITS_PER_GRANULE * f[ch] / s;
1403 max_nbits_ch[gr][ch] = 0;
1407 if (max_nbits_ch[gr][0] > use_nbits_ch[gr][0] + 32) {
1408 max_nbits_ch[gr][1] += max_nbits_ch[gr][0];
1409 max_nbits_ch[gr][1] -= use_nbits_ch[gr][0] + 32;
1410 max_nbits_ch[gr][0] = use_nbits_ch[gr][0] + 32;
1412 if (max_nbits_ch[gr][1] > use_nbits_ch[gr][1] + 32) {
1413 max_nbits_ch[gr][0] += max_nbits_ch[gr][1];
1414 max_nbits_ch[gr][0] -= use_nbits_ch[gr][1] + 32;
1415 max_nbits_ch[gr][1] = use_nbits_ch[gr][1] + 32;
1417 if (max_nbits_ch[gr][0] > MAX_BITS_PER_CHANNEL) {
1418 max_nbits_ch[gr][0] = MAX_BITS_PER_CHANNEL;
1420 if (max_nbits_ch[gr][1] > MAX_BITS_PER_CHANNEL) {
1421 max_nbits_ch[gr][1] = MAX_BITS_PER_CHANNEL;
1424 max_nbits_gr[gr] = 0;
1426 max_nbits_gr[gr] += max_nbits_ch[gr][ch];
1429 sum_fr += max_nbits_gr[gr];
1434 for (gr = 0; gr < ngr; ++gr) {
1435 if (max_nbits_gr[gr] > 0) {
1436 f[gr] = sqrt(max_nbits_gr[gr]);
1437 s += f[gr];
1440 f[gr] = 0;
1443 for (gr = 0; gr < ngr; ++gr) {
1445 max_nbits_gr[gr] = max_nbits_fr * f[gr] / s;
1448 max_nbits_gr[gr] = 0;
1463 for (gr = 0; gr < ngr; ++gr) {
1464 if (max_nbits_gr[gr] > MAX_BITS_PER_GRANULE) {
1465 max_nbits_gr[gr] = MAX_BITS_PER_GRANULE;
1469 for (gr = 0; gr < ngr; ++gr) {
1472 if (max_nbits_ch[gr][ch] > 0) {
1473 f[ch] = sqrt(max_nbits_ch[gr][ch]);
1482 max_nbits_ch[gr][ch] = max_nbits_gr[gr] * f[ch] / s;
1485 max_nbits_ch[gr][ch] = 0;
1489 if (max_nbits_ch[gr][0] > use_nbits_ch[gr][0] + 32) {
1490 max_nbits_ch[gr][1] += max_nbits_ch[gr][0];
1491 max_nbits_ch[gr][1] -= use_nbits_ch[gr][0] + 32;
1492 max_nbits_ch[gr][0] = use_nbits_ch[gr][0] + 32;
1494 if (max_nbits_ch[gr][1] > use_nbits_ch[gr][1] + 32) {
1495 max_nbits_ch[gr][0] += max_nbits_ch[gr][1];
1496 max_nbits_ch[gr][0] -= use_nbits_ch[gr][1] + 32;
1497 max_nbits_ch[gr][1] = use_nbits_ch[gr][1] + 32;
1500 if (max_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
1501 max_nbits_ch[gr][ch] = MAX_BITS_PER_CHANNEL;
1509 for (gr = 0; gr < ngr; ++gr) {
1512 sum_gr += max_nbits_ch[gr][ch];
1513 if (max_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
1527 for (gr = 0; gr < ngr; ++gr) {
1529 max_nbits_ch[gr][ch] = max_bits[gr][ch];
1544 for (gr = 0; gr < ngr; ++gr) {
1546 gfc->l3_side.tt[gr][ch].scalefac_compress = 0;
1553 for (gr = 0; gr < ngr; ++gr) {
1554 use_nbits_gr[gr] = 0;
1556 algo_t const *that = &that_[gr][ch];
1557 use_nbits_ch[gr][ch] = 0;
1558 if (max_bits[gr][ch] > 0) {
1559 int *sfwork = sfwork_[gr][ch];
1560 int const *vbrsfmin = vbrsfmin_[gr][ch];
1562 outOfBitsStrategy(that, sfwork, vbrsfmin, max_nbits_ch[gr][ch]);
1564 use_nbits_ch[gr][ch] = reduce_bit_usage(gfc, gr, ch);
1565 assert(use_nbits_ch[gr][ch] <= max_nbits_ch[gr][ch]);
1566 use_nbits_gr[gr] += use_nbits_ch[gr][ch];
1568 use_nbits_fr += use_nbits_gr[gr];