blob: 5471f9902c9005d360ab2f4eb39b3e8692f8ddc9 [file] [log] [blame]
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
#include "./vpx_dsp_rtcd.h"
#include "./vpx_scale_rtcd.h"
#include "vpx_dsp/psnr.h"
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_dsp/vpx_filter.h"
#if CONFIG_INTERNAL_STATS
#include "vpx_dsp/ssim.h"
#endif
#include "vpx_ports/mem.h"
#include "vpx_ports/system_state.h"
#include "vpx_ports/vpx_timer.h"
#if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
#include "vpx_util/vpx_debug_util.h"
#endif // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
#include "vp9/common/vp9_alloccommon.h"
#include "vp9/common/vp9_filter.h"
#include "vp9/common/vp9_idct.h"
#if CONFIG_NON_GREEDY_MV
#include "vp9/common/vp9_mvref_common.h"
#endif
#if CONFIG_VP9_POSTPROC
#include "vp9/common/vp9_postproc.h"
#endif
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/common/vp9_scan.h"
#if !CONFIG_REALTIME_ONLY
#include "vp9/encoder/vp9_alt_ref_aq.h"
#include "vp9/encoder/vp9_aq_360.h"
#include "vp9/encoder/vp9_aq_complexity.h"
#endif
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#if !CONFIG_REALTIME_ONLY
#include "vp9/encoder/vp9_aq_variance.h"
#endif
#include "vp9/encoder/vp9_bitstream.h"
#if CONFIG_INTERNAL_STATS
#include "vp9/encoder/vp9_blockiness.h"
#endif
#include "vp9/encoder/vp9_context_tree.h"
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_ethread.h"
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_firstpass.h"
#include "vp9/encoder/vp9_mbgraph.h"
#if CONFIG_NON_GREEDY_MV
#include "vp9/encoder/vp9_mcomp.h"
#endif
#include "vp9/encoder/vp9_multi_thread.h"
#include "vp9/encoder/vp9_noise_estimate.h"
#include "vp9/encoder/vp9_picklpf.h"
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_resize.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_skin_detection.h"
#include "vp9/encoder/vp9_speed_features.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
#include "vp9/encoder/vp9_temporal_filter.h"
#include "vp9/vp9_cx_iface.h"
#define AM_SEGMENT_ID_INACTIVE 7
#define AM_SEGMENT_ID_ACTIVE 0
// Whether to use high precision mv for altref computation.
#define ALTREF_HIGH_PRECISION_MV 1
// Q threshold for high precision mv. Choose a very high value for now so that
// HIGH_PRECISION is always chosen.
#define HIGH_PRECISION_MV_QTHRESH 200
#define FRAME_SIZE_FACTOR 128 // empirical params for context model threshold
#define FRAME_RATE_FACTOR 8
#ifdef OUTPUT_YUV_DENOISED
FILE *yuv_denoised_file = NULL;
#endif
#ifdef OUTPUT_YUV_SKINMAP
static FILE *yuv_skinmap_file = NULL;
#endif
#ifdef OUTPUT_YUV_REC
FILE *yuv_rec_file;
#endif
#ifdef OUTPUT_YUV_SVC_SRC
FILE *yuv_svc_src[3] = { NULL, NULL, NULL };
#endif
#if 0
FILE *framepsnr;
FILE *kf_list;
FILE *keyfile;
#endif
#ifdef ENABLE_KF_DENOISE
// Test condition for spatial denoise of source.
static int is_spatial_denoise_enabled(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
return (oxcf->pass != 1) && !is_lossless_requested(&cpi->oxcf) &&
frame_is_intra_only(cm);
}
#endif
#if CONFIG_VP9_HIGHBITDEPTH
void highbd_wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
TX_SIZE tx_size);
#endif
void wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
TX_SIZE tx_size);
#if !CONFIG_REALTIME_ONLY
// compute adaptive threshold for skip recoding
static int compute_context_model_thresh(const VP9_COMP *const cpi) {
const VP9_COMMON *const cm = &cpi->common;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
const int frame_size = (cm->width * cm->height) >> 10;
const int bitrate = (int)(oxcf->target_bandwidth >> 10);
const int qindex_factor = cm->base_qindex + (MAXQ >> 1);
// This equation makes the threshold adaptive to frame size.
// Coding gain obtained by recoding comes from alternate frames of large
// content change. We skip recoding if the difference of previous and current
// frame context probability model is less than a certain threshold.
// The first component is the most critical part to guarantee adaptivity.
// Other parameters are estimated based on normal setting of hd resolution
// parameters. e.g frame_size = 1920x1080, bitrate = 8000, qindex_factor < 50
const int thresh =
((FRAME_SIZE_FACTOR * frame_size - FRAME_RATE_FACTOR * bitrate) *
qindex_factor) >>
9;
return thresh;
}
// compute the total cost difference between current
// and previous frame context prob model.
static int compute_context_model_diff(const VP9_COMMON *const cm) {
const FRAME_CONTEXT *const pre_fc =
&cm->frame_contexts[cm->frame_context_idx];
const FRAME_CONTEXT *const cur_fc = cm->fc;
const FRAME_COUNTS *counts = &cm->counts;
vpx_prob pre_last_prob, cur_last_prob;
int diff = 0;
int i, j, k, l, m, n;
// y_mode_prob
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
for (j = 0; j < INTRA_MODES - 1; ++j) {
diff += (int)counts->y_mode[i][j] *
(pre_fc->y_mode_prob[i][j] - cur_fc->y_mode_prob[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->y_mode_prob[i][INTRA_MODES - 2];
cur_last_prob = MAX_PROB - cur_fc->y_mode_prob[i][INTRA_MODES - 2];
diff += (int)counts->y_mode[i][INTRA_MODES - 1] *
(pre_last_prob - cur_last_prob);
}
// uv_mode_prob
for (i = 0; i < INTRA_MODES; ++i) {
for (j = 0; j < INTRA_MODES - 1; ++j) {
diff += (int)counts->uv_mode[i][j] *
(pre_fc->uv_mode_prob[i][j] - cur_fc->uv_mode_prob[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->uv_mode_prob[i][INTRA_MODES - 2];
cur_last_prob = MAX_PROB - cur_fc->uv_mode_prob[i][INTRA_MODES - 2];
diff += (int)counts->uv_mode[i][INTRA_MODES - 1] *
(pre_last_prob - cur_last_prob);
}
// partition_prob
for (i = 0; i < PARTITION_CONTEXTS; ++i) {
for (j = 0; j < PARTITION_TYPES - 1; ++j) {
diff += (int)counts->partition[i][j] *
(pre_fc->partition_prob[i][j] - cur_fc->partition_prob[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->partition_prob[i][PARTITION_TYPES - 2];
cur_last_prob = MAX_PROB - cur_fc->partition_prob[i][PARTITION_TYPES - 2];
diff += (int)counts->partition[i][PARTITION_TYPES - 1] *
(pre_last_prob - cur_last_prob);
}
// coef_probs
for (i = 0; i < TX_SIZES; ++i) {
for (j = 0; j < PLANE_TYPES; ++j) {
for (k = 0; k < REF_TYPES; ++k) {
for (l = 0; l < COEF_BANDS; ++l) {
for (m = 0; m < BAND_COEFF_CONTEXTS(l); ++m) {
for (n = 0; n < UNCONSTRAINED_NODES; ++n) {
diff += (int)counts->coef[i][j][k][l][m][n] *
(pre_fc->coef_probs[i][j][k][l][m][n] -
cur_fc->coef_probs[i][j][k][l][m][n]);
}
pre_last_prob =
MAX_PROB -
pre_fc->coef_probs[i][j][k][l][m][UNCONSTRAINED_NODES - 1];
cur_last_prob =
MAX_PROB -
cur_fc->coef_probs[i][j][k][l][m][UNCONSTRAINED_NODES - 1];
diff += (int)counts->coef[i][j][k][l][m][UNCONSTRAINED_NODES] *
(pre_last_prob - cur_last_prob);
}
}
}
}
}
// switchable_interp_prob
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) {
for (j = 0; j < SWITCHABLE_FILTERS - 1; ++j) {
diff += (int)counts->switchable_interp[i][j] *
(pre_fc->switchable_interp_prob[i][j] -
cur_fc->switchable_interp_prob[i][j]);
}
pre_last_prob =
MAX_PROB - pre_fc->switchable_interp_prob[i][SWITCHABLE_FILTERS - 2];
cur_last_prob =
MAX_PROB - cur_fc->switchable_interp_prob[i][SWITCHABLE_FILTERS - 2];
diff += (int)counts->switchable_interp[i][SWITCHABLE_FILTERS - 1] *
(pre_last_prob - cur_last_prob);
}
// inter_mode_probs
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
for (j = 0; j < INTER_MODES - 1; ++j) {
diff += (int)counts->inter_mode[i][j] *
(pre_fc->inter_mode_probs[i][j] - cur_fc->inter_mode_probs[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->inter_mode_probs[i][INTER_MODES - 2];
cur_last_prob = MAX_PROB - cur_fc->inter_mode_probs[i][INTER_MODES - 2];
diff += (int)counts->inter_mode[i][INTER_MODES - 1] *
(pre_last_prob - cur_last_prob);
}
// intra_inter_prob
for (i = 0; i < INTRA_INTER_CONTEXTS; ++i) {
diff += (int)counts->intra_inter[i][0] *
(pre_fc->intra_inter_prob[i] - cur_fc->intra_inter_prob[i]);
pre_last_prob = MAX_PROB - pre_fc->intra_inter_prob[i];
cur_last_prob = MAX_PROB - cur_fc->intra_inter_prob[i];
diff += (int)counts->intra_inter[i][1] * (pre_last_prob - cur_last_prob);
}
// comp_inter_prob
for (i = 0; i < COMP_INTER_CONTEXTS; ++i) {
diff += (int)counts->comp_inter[i][0] *
(pre_fc->comp_inter_prob[i] - cur_fc->comp_inter_prob[i]);
pre_last_prob = MAX_PROB - pre_fc->comp_inter_prob[i];
cur_last_prob = MAX_PROB - cur_fc->comp_inter_prob[i];
diff += (int)counts->comp_inter[i][1] * (pre_last_prob - cur_last_prob);
}
// single_ref_prob
for (i = 0; i < REF_CONTEXTS; ++i) {
for (j = 0; j < 2; ++j) {
diff += (int)counts->single_ref[i][j][0] *
(pre_fc->single_ref_prob[i][j] - cur_fc->single_ref_prob[i][j]);
pre_last_prob = MAX_PROB - pre_fc->single_ref_prob[i][j];
cur_last_prob = MAX_PROB - cur_fc->single_ref_prob[i][j];
diff +=
(int)counts->single_ref[i][j][1] * (pre_last_prob - cur_last_prob);
}
}
// comp_ref_prob
for (i = 0; i < REF_CONTEXTS; ++i) {
diff += (int)counts->comp_ref[i][0] *
(pre_fc->comp_ref_prob[i] - cur_fc->comp_ref_prob[i]);
pre_last_prob = MAX_PROB - pre_fc->comp_ref_prob[i];
cur_last_prob = MAX_PROB - cur_fc->comp_ref_prob[i];
diff += (int)counts->comp_ref[i][1] * (pre_last_prob - cur_last_prob);
}
// tx_probs
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
// p32x32
for (j = 0; j < TX_SIZES - 1; ++j) {
diff += (int)counts->tx.p32x32[i][j] *
(pre_fc->tx_probs.p32x32[i][j] - cur_fc->tx_probs.p32x32[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->tx_probs.p32x32[i][TX_SIZES - 2];
cur_last_prob = MAX_PROB - cur_fc->tx_probs.p32x32[i][TX_SIZES - 2];
diff += (int)counts->tx.p32x32[i][TX_SIZES - 1] *
(pre_last_prob - cur_last_prob);
// p16x16
for (j = 0; j < TX_SIZES - 2; ++j) {
diff += (int)counts->tx.p16x16[i][j] *
(pre_fc->tx_probs.p16x16[i][j] - cur_fc->tx_probs.p16x16[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->tx_probs.p16x16[i][TX_SIZES - 3];
cur_last_prob = MAX_PROB - cur_fc->tx_probs.p16x16[i][TX_SIZES - 3];
diff += (int)counts->tx.p16x16[i][TX_SIZES - 2] *
(pre_last_prob - cur_last_prob);
// p8x8
for (j = 0; j < TX_SIZES - 3; ++j) {
diff += (int)counts->tx.p8x8[i][j] *
(pre_fc->tx_probs.p8x8[i][j] - cur_fc->tx_probs.p8x8[i][j]);
}
pre_last_prob = MAX_PROB - pre_fc->tx_probs.p8x8[i][TX_SIZES - 4];
cur_last_prob = MAX_PROB - cur_fc->tx_probs.p8x8[i][TX_SIZES - 4];
diff +=
(int)counts->tx.p8x8[i][TX_SIZES - 3] * (pre_last_prob - cur_last_prob);
}
// skip_probs
for (i = 0; i < SKIP_CONTEXTS; ++i) {
diff += (int)counts->skip[i][0] *
(pre_fc->skip_probs[i] - cur_fc->skip_probs[i]);
pre_last_prob = MAX_PROB - pre_fc->skip_probs[i];
cur_last_prob = MAX_PROB - cur_fc->skip_probs[i];
diff += (int)counts->skip[i][1] * (pre_last_prob - cur_last_prob);
}
// mv
for (i = 0; i < MV_JOINTS - 1; ++i) {
diff += (int)counts->mv.joints[i] *
(pre_fc->nmvc.joints[i] - cur_fc->nmvc.joints[i]);
}
pre_last_prob = MAX_PROB - pre_fc->nmvc.joints[MV_JOINTS - 2];
cur_last_prob = MAX_PROB - cur_fc->nmvc.joints[MV_JOINTS - 2];
diff +=
(int)counts->mv.joints[MV_JOINTS - 1] * (pre_last_prob - cur_last_prob);
for (i = 0; i < 2; ++i) {
const nmv_component_counts *nmv_count = &counts->mv.comps[i];
const nmv_component *pre_nmv_prob = &pre_fc->nmvc.comps[i];
const nmv_component *cur_nmv_prob = &cur_fc->nmvc.comps[i];
// sign
diff += (int)nmv_count->sign[0] * (pre_nmv_prob->sign - cur_nmv_prob->sign);
pre_last_prob = MAX_PROB - pre_nmv_prob->sign;
cur_last_prob = MAX_PROB - cur_nmv_prob->sign;
diff += (int)nmv_count->sign[1] * (pre_last_prob - cur_last_prob);
// classes
for (j = 0; j < MV_CLASSES - 1; ++j) {
diff += (int)nmv_count->classes[j] *
(pre_nmv_prob->classes[j] - cur_nmv_prob->classes[j]);
}
pre_last_prob = MAX_PROB - pre_nmv_prob->classes[MV_CLASSES - 2];
cur_last_prob = MAX_PROB - cur_nmv_prob->classes[MV_CLASSES - 2];
diff += (int)nmv_count->classes[MV_CLASSES - 1] *
(pre_last_prob - cur_last_prob);
// class0
for (j = 0; j < CLASS0_SIZE - 1; ++j) {
diff += (int)nmv_count->class0[j] *
(pre_nmv_prob->class0[j] - cur_nmv_prob->class0[j]);
}
pre_last_prob = MAX_PROB - pre_nmv_prob->class0[CLASS0_SIZE - 2];
cur_last_prob = MAX_PROB - cur_nmv_prob->class0[CLASS0_SIZE - 2];
diff += (int)nmv_count->class0[CLASS0_SIZE - 1] *
(pre_last_prob - cur_last_prob);
// bits
for (j = 0; j < MV_OFFSET_BITS; ++j) {
diff += (int)nmv_count->bits[j][0] *
(pre_nmv_prob->bits[j] - cur_nmv_prob->bits[j]);
pre_last_prob = MAX_PROB - pre_nmv_prob->bits[j];
cur_last_prob = MAX_PROB - cur_nmv_prob->bits[j];
diff += (int)nmv_count->bits[j][1] * (pre_last_prob - cur_last_prob);
}
// class0_fp
for (j = 0; j < CLASS0_SIZE; ++j) {
for (k = 0; k < MV_FP_SIZE - 1; ++k) {
diff += (int)nmv_count->class0_fp[j][k] *
(pre_nmv_prob->class0_fp[j][k] - cur_nmv_prob->class0_fp[j][k]);
}
pre_last_prob = MAX_PROB - pre_nmv_prob->class0_fp[j][MV_FP_SIZE - 2];
cur_last_prob = MAX_PROB - cur_nmv_prob->class0_fp[j][MV_FP_SIZE - 2];
diff += (int)nmv_count->class0_fp[j][MV_FP_SIZE - 1] *
(pre_last_prob - cur_last_prob);
}
// fp
for (j = 0; j < MV_FP_SIZE - 1; ++j) {
diff +=
(int)nmv_count->fp[j] * (pre_nmv_prob->fp[j] - cur_nmv_prob->fp[j]);
}
pre_last_prob = MAX_PROB - pre_nmv_prob->fp[MV_FP_SIZE - 2];
cur_last_prob = MAX_PROB - cur_nmv_prob->fp[MV_FP_SIZE - 2];
diff +=
(int)nmv_count->fp[MV_FP_SIZE - 1] * (pre_last_prob - cur_last_prob);
// class0_hp
diff += (int)nmv_count->class0_hp[0] *
(pre_nmv_prob->class0_hp - cur_nmv_prob->class0_hp);
pre_last_prob = MAX_PROB - pre_nmv_prob->class0_hp;
cur_last_prob = MAX_PROB - cur_nmv_prob->class0_hp;
diff += (int)nmv_count->class0_hp[1] * (pre_last_prob - cur_last_prob);
// hp
diff += (int)nmv_count->hp[0] * (pre_nmv_prob->hp - cur_nmv_prob->hp);
pre_last_prob = MAX_PROB - pre_nmv_prob->hp;
cur_last_prob = MAX_PROB - cur_nmv_prob->hp;
diff += (int)nmv_count->hp[1] * (pre_last_prob - cur_last_prob);
}
return -diff;
}
#endif // !CONFIG_REALTIME_ONLY
// Test for whether to calculate metrics for the frame.
static int is_psnr_calc_enabled(const VP9_COMP *cpi) {
const VP9_COMMON *const cm = &cpi->common;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
return cpi->b_calculate_psnr && (oxcf->pass != 1) && cm->show_frame;
}
/* clang-format off */
const Vp9LevelSpec vp9_level_defs[VP9_LEVELS] = {
// sample rate size breadth bitrate cpb
{ LEVEL_1, 829440, 36864, 512, 200, 400, 2, 1, 4, 8 },
{ LEVEL_1_1, 2764800, 73728, 768, 800, 1000, 2, 1, 4, 8 },
{ LEVEL_2, 4608000, 122880, 960, 1800, 1500, 2, 1, 4, 8 },
{ LEVEL_2_1, 9216000, 245760, 1344, 3600, 2800, 2, 2, 4, 8 },
{ LEVEL_3, 20736000, 552960, 2048, 7200, 6000, 2, 4, 4, 8 },
{ LEVEL_3_1, 36864000, 983040, 2752, 12000, 10000, 2, 4, 4, 8 },
{ LEVEL_4, 83558400, 2228224, 4160, 18000, 16000, 4, 4, 4, 8 },
{ LEVEL_4_1, 160432128, 2228224, 4160, 30000, 18000, 4, 4, 5, 6 },
{ LEVEL_5, 311951360, 8912896, 8384, 60000, 36000, 6, 8, 6, 4 },
{ LEVEL_5_1, 588251136, 8912896, 8384, 120000, 46000, 8, 8, 10, 4 },
// TODO(huisu): update max_cpb_size for level 5_2 ~ 6_2 when
// they are finalized (currently tentative).
{ LEVEL_5_2, 1176502272, 8912896, 8384, 180000, 90000, 8, 8, 10, 4 },
{ LEVEL_6, 1176502272, 35651584, 16832, 180000, 90000, 8, 16, 10, 4 },
{ LEVEL_6_1, 2353004544u, 35651584, 16832, 240000, 180000, 8, 16, 10, 4 },
{ LEVEL_6_2, 4706009088u, 35651584, 16832, 480000, 360000, 8, 16, 10, 4 },
};
/* clang-format on */
static const char *level_fail_messages[TARGET_LEVEL_FAIL_IDS] = {
"The average bit-rate is too high.",
"The picture size is too large.",
"The picture width/height is too large.",
"The luma sample rate is too large.",
"The CPB size is too large.",
"The compression ratio is too small",
"Too many column tiles are used.",
"The alt-ref distance is too small.",
"Too many reference buffers are used."
};
static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
switch (mode) {
case NORMAL:
*hr = 1;
*hs = 1;
break;
case FOURFIVE:
*hr = 4;
*hs = 5;
break;
case THREEFIVE:
*hr = 3;
*hs = 5;
break;
default:
assert(mode == ONETWO);
*hr = 1;
*hs = 2;
break;
}
}
// Mark all inactive blocks as active. Other segmentation features may be set
// so memset cannot be used, instead only inactive blocks should be reset.
static void suppress_active_map(VP9_COMP *cpi) {
unsigned char *const seg_map = cpi->segmentation_map;
if (cpi->active_map.enabled || cpi->active_map.update) {
const int rows = cpi->common.mi_rows;
const int cols = cpi->common.mi_cols;
int i;
for (i = 0; i < rows * cols; ++i)
if (seg_map[i] == AM_SEGMENT_ID_INACTIVE)
seg_map[i] = AM_SEGMENT_ID_ACTIVE;
}
}
static void apply_active_map(VP9_COMP *cpi) {
struct segmentation *const seg = &cpi->common.seg;
unsigned char *const seg_map = cpi->segmentation_map;
const unsigned char *const active_map = cpi->active_map.map;
int i;
assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE);
if (frame_is_intra_only(&cpi->common)) {
cpi->active_map.enabled = 0;
cpi->active_map.update = 1;
}
if (cpi->active_map.update) {
if (cpi->active_map.enabled) {
for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
vp9_enable_segmentation(seg);
vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
// Setting the data to -MAX_LOOP_FILTER will result in the computed loop
// filter level being zero regardless of the value of seg->abs_delta.
vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
-MAX_LOOP_FILTER);
} else {
vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
if (seg->enabled) {
seg->update_data = 1;
seg->update_map = 1;
}
}
cpi->active_map.update = 0;
}
}
static void apply_roi_map(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
struct segmentation *const seg = &cm->seg;
vpx_roi_map_t *roi = &cpi->roi;
const int *delta_q = roi->delta_q;
const int *delta_lf = roi->delta_lf;
const int *skip = roi->skip;
int ref_frame[8];
int internal_delta_q[MAX_SEGMENTS];
int i;
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
VP9_ALT_FLAG };
// TODO(jianj): Investigate why ROI not working in speed < 5 or in non
// realtime mode.
if (cpi->oxcf.mode != REALTIME || cpi->oxcf.speed < 5) return;
if (!roi->enabled) return;
memcpy(&ref_frame, roi->ref_frame, sizeof(ref_frame));
vp9_enable_segmentation(seg);
vp9_clearall_segfeatures(seg);
// Select delta coding method;
seg->abs_delta = SEGMENT_DELTADATA;
memcpy(cpi->segmentation_map, roi->roi_map, (cm->mi_rows * cm->mi_cols));
for (i = 0; i < MAX_SEGMENTS; ++i) {
// Translate the external delta q values to internal values.
internal_delta_q[i] = vp9_quantizer_to_qindex(abs(delta_q[i]));
if (delta_q[i] < 0) internal_delta_q[i] = -internal_delta_q[i];
vp9_disable_segfeature(seg, i, SEG_LVL_ALT_Q);
vp9_disable_segfeature(seg, i, SEG_LVL_ALT_LF);
if (internal_delta_q[i] != 0) {
vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, internal_delta_q[i]);
}
if (delta_lf[i] != 0) {
vp9_enable_segfeature(seg, i, SEG_LVL_ALT_LF);
vp9_set_segdata(seg, i, SEG_LVL_ALT_LF, delta_lf[i]);
}
if (skip[i] != 0) {
vp9_enable_segfeature(seg, i, SEG_LVL_SKIP);
vp9_set_segdata(seg, i, SEG_LVL_SKIP, skip[i]);
}
if (ref_frame[i] >= 0) {
int valid_ref = 1;
// ALTREF is not used as reference for nonrd_pickmode with 0 lag.
if (ref_frame[i] == ALTREF_FRAME && cpi->sf.use_nonrd_pick_mode)
valid_ref = 0;
// If GOLDEN is selected, make sure it's set as reference.
if (ref_frame[i] == GOLDEN_FRAME &&
!(cpi->ref_frame_flags & flag_list[ref_frame[i]])) {
valid_ref = 0;
}
// GOLDEN was updated in previous encoded frame, so GOLDEN and LAST are
// same reference.
if (ref_frame[i] == GOLDEN_FRAME && cpi->rc.frames_since_golden == 0)
ref_frame[i] = LAST_FRAME;
if (valid_ref) {
vp9_enable_segfeature(seg, i, SEG_LVL_REF_FRAME);
vp9_set_segdata(seg, i, SEG_LVL_REF_FRAME, ref_frame[i]);
}
}
}
roi->enabled = 1;
}
static void init_level_info(Vp9LevelInfo *level_info) {
Vp9LevelStats *const level_stats = &level_info->level_stats;
Vp9LevelSpec *const level_spec = &level_info->level_spec;
memset(level_stats, 0, sizeof(*level_stats));
memset(level_spec, 0, sizeof(*level_spec));
level_spec->level = LEVEL_UNKNOWN;
level_spec->min_altref_distance = INT_MAX;
}
static int check_seg_range(int seg_data[8], int range) {
return !(abs(seg_data[0]) > range || abs(seg_data[1]) > range ||
abs(seg_data[2]) > range || abs(seg_data[3]) > range ||
abs(seg_data[4]) > range || abs(seg_data[5]) > range ||
abs(seg_data[6]) > range || abs(seg_data[7]) > range);
}
VP9_LEVEL vp9_get_level(const Vp9LevelSpec *const level_spec) {
int i;
const Vp9LevelSpec *this_level;
vpx_clear_system_state();
for (i = 0; i < VP9_LEVELS; ++i) {
this_level = &vp9_level_defs[i];
if ((double)level_spec->max_luma_sample_rate >
(double)this_level->max_luma_sample_rate *
(1 + SAMPLE_RATE_GRACE_P) ||
level_spec->max_luma_picture_size > this_level->max_luma_picture_size ||
level_spec->max_luma_picture_breadth >
this_level->max_luma_picture_breadth ||
level_spec->average_bitrate > this_level->average_bitrate ||
level_spec->max_cpb_size > this_level->max_cpb_size ||
level_spec->compression_ratio < this_level->compression_ratio ||
level_spec->max_col_tiles > this_level->max_col_tiles ||
level_spec->min_altref_distance < this_level->min_altref_distance ||
level_spec->max_ref_frame_buffers > this_level->max_ref_frame_buffers)
continue;
break;
}
return (i == VP9_LEVELS) ? LEVEL_UNKNOWN : vp9_level_defs[i].level;
}
int vp9_set_roi_map(VP9_COMP *cpi, unsigned char *map, unsigned int rows,
unsigned int cols, int delta_q[8], int delta_lf[8],
int skip[8], int ref_frame[8]) {
VP9_COMMON *cm = &cpi->common;
vpx_roi_map_t *roi = &cpi->roi;
const int range = 63;
const int ref_frame_range = 3; // Alt-ref
const int skip_range = 1;
const int frame_rows = cpi->common.mi_rows;
const int frame_cols = cpi->common.mi_cols;
// Check number of rows and columns match
if (frame_rows != (int)rows || frame_cols != (int)cols) {
return -1;
}
if (!check_seg_range(delta_q, range) || !check_seg_range(delta_lf, range) ||
!check_seg_range(ref_frame, ref_frame_range) ||
!check_seg_range(skip, skip_range))
return -1;
// Also disable segmentation if no deltas are specified.
if (!map ||
(!(delta_q[0] | delta_q[1] | delta_q[2] | delta_q[3] | delta_q[4] |
delta_q[5] | delta_q[6] | delta_q[7] | delta_lf[0] | delta_lf[1] |
delta_lf[2] | delta_lf[3] | delta_lf[4] | delta_lf[5] | delta_lf[6] |
delta_lf[7] | skip[0] | skip[1] | skip[2] | skip[3] | skip[4] |
skip[5] | skip[6] | skip[7]) &&
(ref_frame[0] == -1 && ref_frame[1] == -1 && ref_frame[2] == -1 &&
ref_frame[3] == -1 && ref_frame[4] == -1 && ref_frame[5] == -1 &&
ref_frame[6] == -1 && ref_frame[7] == -1))) {
vp9_disable_segmentation(&cm->seg);
cpi->roi.enabled = 0;
return 0;
}
if (roi->roi_map) {
vpx_free(roi->roi_map);
roi->roi_map = NULL;
}
CHECK_MEM_ERROR(cm, roi->roi_map, vpx_malloc(rows * cols));
// Copy to ROI structure in the compressor.
memcpy(roi->roi_map, map, rows * cols);
memcpy(&roi->delta_q, delta_q, MAX_SEGMENTS * sizeof(delta_q[0]));
memcpy(&roi->delta_lf, delta_lf, MAX_SEGMENTS * sizeof(delta_lf[0]));
memcpy(&roi->skip, skip, MAX_SEGMENTS * sizeof(skip[0]));
memcpy(&roi->ref_frame, ref_frame, MAX_SEGMENTS * sizeof(ref_frame[0]));
roi->enabled = 1;
roi->rows = rows;
roi->cols = cols;
return 0;
}
int vp9_set_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
unsigned char *const active_map_8x8 = cpi->active_map.map;
const int mi_rows = cpi->common.mi_rows;
const int mi_cols = cpi->common.mi_cols;
cpi->active_map.update = 1;
if (new_map_16x16) {
int r, c;
for (r = 0; r < mi_rows; ++r) {
for (c = 0; c < mi_cols; ++c) {
active_map_8x8[r * mi_cols + c] =
new_map_16x16[(r >> 1) * cols + (c >> 1)]
? AM_SEGMENT_ID_ACTIVE
: AM_SEGMENT_ID_INACTIVE;
}
}
cpi->active_map.enabled = 1;
} else {
cpi->active_map.enabled = 0;
}
return 0;
} else {
return -1;
}
}
int vp9_get_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
new_map_16x16) {
unsigned char *const seg_map_8x8 = cpi->segmentation_map;
const int mi_rows = cpi->common.mi_rows;
const int mi_cols = cpi->common.mi_cols;
memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
if (cpi->active_map.enabled) {
int r, c;
for (r = 0; r < mi_rows; ++r) {
for (c = 0; c < mi_cols; ++c) {
// Cyclic refresh segments are considered active despite not having
// AM_SEGMENT_ID_ACTIVE
new_map_16x16[(r >> 1) * cols + (c >> 1)] |=
seg_map_8x8[r * mi_cols + c] != AM_SEGMENT_ID_INACTIVE;
}
}
}
return 0;
} else {
return -1;
}
}
void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->td.mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
if (cpi->common.allow_high_precision_mv) {
mb->mvcost = mb->nmvcost_hp;
mb->mvsadcost = mb->nmvsadcost_hp;
} else {
mb->mvcost = mb->nmvcost;
mb->mvsadcost = mb->nmvsadcost;
}
}
static void setup_frame(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// Set up entropy context depending on frame type. The decoder mandates
// the use of the default context, index 0, for keyframes and inter
// frames where the error_resilient_mode or intra_only flag is set. For
// other inter-frames the encoder currently uses only two contexts;
// context 1 for ALTREF frames and context 0 for the others.
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
vp9_setup_past_independence(cm);
} else {
if (!cpi->use_svc) cm->frame_context_idx = cpi->refresh_alt_ref_frame;
}
// TODO(jingning): Overwrite the frame_context_idx index in multi-layer ARF
// case. Need some further investigation on if we could apply this to single
// layer ARF case as well.
if (cpi->multi_layer_arf && !cpi->use_svc) {
GF_GROUP *const gf_group = &cpi->twopass.gf_group;
const int gf_group_index = gf_group->index;
const int boost_frame =
!cpi->rc.is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame);
// frame_context_idx Frame Type
// 0 Intra only frame, base layer ARF
// 1 ARFs with layer depth = 2,3
// 2 ARFs with layer depth > 3
// 3 Non-boosted frames
if (frame_is_intra_only(cm)) {
cm->frame_context_idx = 0;
} else if (boost_frame) {
if (gf_group->rf_level[gf_group_index] == GF_ARF_STD)
cm->frame_context_idx = 0;
else if (gf_group->layer_depth[gf_group_index] <= 3)
cm->frame_context_idx = 1;
else
cm->frame_context_idx = 2;
} else {
cm->frame_context_idx = 3;
}
}
if (cm->frame_type == KEY_FRAME) {
cpi->refresh_golden_frame = 1;
cpi->refresh_alt_ref_frame = 1;
vp9_zero(cpi->interp_filter_selected);
} else {
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
vp9_zero(cpi->interp_filter_selected[0]);
}
}
static void vp9_enc_setup_mi(VP9_COMMON *cm) {
int i;
cm->mi = cm->mip + cm->mi_stride + 1;
memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
// Clear top border row
memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
// Clear left border column
for (i = 1; i < cm->mi_rows + 1; ++i)
memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
memset(cm->mi_grid_base, 0,
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
if (!cm->prev_mip) return 1;
cm->mi_alloc_size = mi_size;
cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->mi_grid_base) return 1;
cm->prev_mi_grid_base =
(MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->prev_mi_grid_base) return 1;
return 0;
}
static void vp9_enc_free_mi(VP9_COMMON *cm) {
vpx_free(cm->mip);
cm->mip = NULL;
vpx_free(cm->prev_mip);
cm->prev_mip = NULL;
vpx_free(cm->mi_grid_base);
cm->mi_grid_base = NULL;
vpx_free(cm->prev_mi_grid_base);
cm->prev_mi_grid_base = NULL;
cm->mi_alloc_size = 0;
}
static void vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) {
// Current mip will be the prev_mip for the next frame.
MODE_INFO **temp_base = cm->prev_mi_grid_base;
MODE_INFO *temp = cm->prev_mip;
// Skip update prev_mi frame in show_existing_frame mode.
if (cm->show_existing_frame) return;
cm->prev_mip = cm->mip;
cm->mip = temp;
// Update the upper left visible macroblock ptrs.
cm->mi = cm->mip + cm->mi_stride + 1;
cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
cm->prev_mi_grid_base = cm->mi_grid_base;
cm->mi_grid_base = temp_base;
cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
}
void vp9_initialize_enc(void) {
static volatile int init_done = 0;
if (!init_done) {
vp9_rtcd();
vpx_dsp_rtcd();
vpx_scale_rtcd();
vp9_init_intra_predictors();
vp9_init_me_luts();
vp9_rc_init_minq_luts();
vp9_entropy_mv_init();
#if !CONFIG_REALTIME_ONLY
vp9_temporal_filter_init();
#endif
init_done = 1;
}
}
static void dealloc_compressor_data(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int i;
vpx_free(cpi->mbmi_ext_base);
cpi->mbmi_ext_base = NULL;
vpx_free(cpi->tile_data);
cpi->tile_data = NULL;
vpx_free(cpi->segmentation_map);
cpi->segmentation_map = NULL;
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
cpi->coding_context.last_frame_seg_map_copy = NULL;
vpx_free(cpi->nmvcosts[0]);
vpx_free(cpi->nmvcosts[1]);
cpi->nmvcosts[0] = NULL;
cpi->nmvcosts[1] = NULL;
vpx_free(cpi->nmvcosts_hp[0]);
vpx_free(cpi->nmvcosts_hp[1]);
cpi->nmvcosts_hp[0] = NULL;
cpi->nmvcosts_hp[1] = NULL;
vpx_free(cpi->nmvsadcosts[0]);
vpx_free(cpi->nmvsadcosts[1]);
cpi->nmvsadcosts[0] = NULL;
cpi->nmvsadcosts[1] = NULL;
vpx_free(cpi->nmvsadcosts_hp[0]);
vpx_free(cpi->nmvsadcosts_hp[1]);
cpi->nmvsadcosts_hp[0] = NULL;
cpi->nmvsadcosts_hp[1] = NULL;
vpx_free(cpi->skin_map);
cpi->skin_map = NULL;
vpx_free(cpi->prev_partition);
cpi->prev_partition = NULL;
vpx_free(cpi->svc.prev_partition_svc);
cpi->svc.prev_partition_svc = NULL;
vpx_free(cpi->prev_segment_id);
cpi->prev_segment_id = NULL;
vpx_free(cpi->prev_variance_low);
cpi->prev_variance_low = NULL;
vpx_free(cpi->copied_frame_cnt);
cpi->copied_frame_cnt = NULL;
vpx_free(cpi->content_state_sb_fd);
cpi->content_state_sb_fd = NULL;
vpx_free(cpi->count_arf_frame_usage);
cpi->count_arf_frame_usage = NULL;
vpx_free(cpi->count_lastgolden_frame_usage);
cpi->count_lastgolden_frame_usage = NULL;
vp9_cyclic_refresh_free(cpi->cyclic_refresh);
cpi->cyclic_refresh = NULL;
vpx_free(cpi->active_map.map);
cpi->active_map.map = NULL;
vpx_free(cpi->roi.roi_map);
cpi->roi.roi_map = NULL;
vpx_free(cpi->consec_zero_mv);
cpi->consec_zero_mv = NULL;
vpx_free(cpi->mb_wiener_variance);
cpi->mb_wiener_variance = NULL;
vpx_free(cpi->mi_ssim_rdmult_scaling_factors);
cpi->mi_ssim_rdmult_scaling_factors = NULL;
#if CONFIG_RATE_CTRL
free_partition_info(cpi);
free_motion_vector_info(cpi);
free_fp_motion_vector_info(cpi);
#endif
vp9_free_ref_frame_buffers(cm->buffer_pool);
#if CONFIG_VP9_POSTPROC
vp9_free_postproc_buffers(cm);
#endif
vp9_free_context_buffers(cm);
vpx_free_frame_buffer(&cpi->last_frame_uf);
vpx_free_frame_buffer(&cpi->scaled_source);
vpx_free_frame_buffer(&cpi->scaled_last_source);
vpx_free_frame_buffer(&cpi->alt_ref_buffer);
#ifdef ENABLE_KF_DENOISE
vpx_free_frame_buffer(&cpi->raw_unscaled_source);
vpx_free_frame_buffer(&cpi->raw_scaled_source);
#endif
vp9_lookahead_destroy(cpi->lookahead);
vpx_free(cpi->tile_tok[0][0]);
cpi->tile_tok[0][0] = 0;
vpx_free(cpi->tplist[0][0]);
cpi->tplist[0][0] = NULL;
vp9_free_pc_tree(&cpi->td);
for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
LAYER_CONTEXT *const lc = &cpi->svc.layer_context[i];
vpx_free(lc->rc_twopass_stats_in.buf);
lc->rc_twopass_stats_in.buf = NULL;
lc->rc_twopass_stats_in.sz = 0;
}
if (cpi->source_diff_var != NULL) {
vpx_free(cpi->source_diff_var);
cpi->source_diff_var = NULL;
}
for (i = 0; i < MAX_LAG_BUFFERS; ++i) {
vpx_free_frame_buffer(&cpi->svc.scaled_frames[i]);
}
memset(&cpi->svc.scaled_frames[0], 0,
MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
vpx_free_frame_buffer(&cpi->svc.scaled_temp);
memset(&cpi->svc.scaled_temp, 0, sizeof(cpi->svc.scaled_temp));
vpx_free_frame_buffer(&cpi->svc.empty_frame.img);
memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
vp9_free_svc_cyclic_refresh(cpi);
}
static void save_coding_context(VP9_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
VP9_COMMON *cm = &cpi->common;
// Stores a snapshot of key state variables which can subsequently be
// restored with a call to vp9_restore_coding_context. These functions are
// intended for use in a re-code loop in vp9_compress_frame where the
// quantizer value is adjusted between loop iterations.
vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
MV_VALS * sizeof(*cpi->nmvcosts[1]));
memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
(cm->mi_rows * cm->mi_cols));
vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
cc->fc = *cm->fc;
}
static void restore_coding_context(VP9_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
VP9_COMMON *cm = &cpi->common;
// Restore key state variables to the snapshot state stored in the
// previous call to vp9_save_coding_context.
vp9_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
(cm->mi_rows * cm->mi_cols));
vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
*cm->fc = cc->fc;
}
#if !CONFIG_REALTIME_ONLY
static void configure_static_seg_features(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
struct segmentation *const seg = &cm->seg;
int high_q = (int)(rc->avg_q > 48.0);
int qi_delta;
// Disable and clear down for KF
if (cm->frame_type == KEY_FRAME) {
// Clear down the global segmentation map
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
cpi->static_mb_pct = 0;
// Disable segmentation
vp9_disable_segmentation(seg);
// Clear down the segment features.
vp9_clearall_segfeatures(seg);
} else if (cpi->refresh_alt_ref_frame) {
// If this is an alt ref frame
// Clear down the global segmentation map
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
vp9_disable_segmentation(seg);
vp9_clearall_segfeatures(seg);
// Scan frames from current to arf frame.
// This function re-enables segmentation if appropriate.
vp9_update_mbgraph_stats(cpi);
// If segmentation was enabled set those features needed for the
// arf itself.
if (seg->enabled) {
seg->update_map = 1;
seg->update_data = 1;
qi_delta =
vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
// Where relevant assume segment data is delta data
seg->abs_delta = SEGMENT_DELTADATA;
}
} else if (seg->enabled) {
// All other frames if segmentation has been enabled
// First normal frame in a valid gf or alt ref group
if (rc->frames_since_golden == 0) {
// Set up segment features for normal frames in an arf group
if (rc->source_alt_ref_active) {
seg->update_map = 0;
seg->update_data = 1;
seg->abs_delta = SEGMENT_DELTADATA;
qi_delta =
vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
// Segment coding disabled for compred testing
if (high_q || (cpi->static_mb_pct == 100)) {
vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
} else {
// Disable segmentation and clear down features if alt ref
// is not active for this group
vp9_disable_segmentation(seg);
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
vp9_clearall_segfeatures(seg);
}
} else if (rc->is_src_frame_alt_ref) {
// Special case where we are coding over the top of a previous
// alt ref frame.
// Segment coding disabled for compred testing
// Enable ref frame features for segment 0 as well
vp9_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
// All mbs should use ALTREF_FRAME
vp9_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
vp9_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
vp9_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
// Skip all MBs if high Q (0,0 mv and skip coeffs)
if (high_q) {
vp9_enable_segfeature(seg, 0, SEG_LVL_SKIP);
vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
// Enable data update
seg->update_data = 1;
} else {
// All other frames.
// No updates.. leave things as they are.
seg->update_map = 0;
seg->update_data = 0;
}
}
}
#endif // !CONFIG_REALTIME_ONLY
static void update_reference_segmentation_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
uint8_t *cache_ptr = cm->last_frame_seg_map;
int row, col;
for (row = 0; row < cm->mi_rows; row++) {
MODE_INFO **mi_8x8 = mi_8x8_ptr;
uint8_t *cache = cache_ptr;
for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
cache[0] = mi_8x8[0]->segment_id;
mi_8x8_ptr += cm->mi_stride;
cache_ptr += cm->mi_cols;
}
}
static void alloc_raw_frame_buffers(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
const VP9EncoderConfig *oxcf = &cpi->oxcf;
if (!cpi->lookahead)
cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
oxcf->lag_in_frames);
if (!cpi->lookahead)
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate lag buffers");
// TODO(agrange) Check if ARF is enabled and skip allocation if not.
if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate altref buffer");
}
static void alloc_util_frame_buffers(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled source buffer");
// For 1 pass cbr: allocate scaled_frame that may be used as an intermediate
// buffer for a 2 stage down-sampling: two stages of 1:2 down-sampling for a
// target of 1/4x1/4. number_spatial_layers must be greater than 2.
if (is_one_pass_cbr_svc(cpi) && !cpi->svc.scaled_temp_is_alloc &&
cpi->svc.number_spatial_layers > 2) {
cpi->svc.scaled_temp_is_alloc = 1;
if (vpx_realloc_frame_buffer(
&cpi->svc.scaled_temp, cm->width >> 1, cm->height >> 1,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL))
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled_frame for svc ");
}
if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled last source buffer");
#ifdef ENABLE_KF_DENOISE
if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate unscaled raw source frame buffer");
if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled raw source frame buffer");
#endif
}
static int alloc_context_buffers_ext(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
int mi_size = cm->mi_cols * cm->mi_rows;
cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
if (!cpi->mbmi_ext_base) return 1;
return 0;
}
static void alloc_compressor_data(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
int sb_rows;
vp9_alloc_context_buffers(cm, cm->width, cm->height);
alloc_context_buffers_ext(cpi);
vpx_free(cpi->tile_tok[0][0]);
{
unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
}
sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
vpx_free(cpi->tplist[0][0]);
CHECK_MEM_ERROR(
cm, cpi->tplist[0][0],
vpx_calloc(sb_rows * 4 * (1 << 6), sizeof(*cpi->tplist[0][0])));
vp9_setup_pc_tree(&cpi->common, &cpi->td);
}
void vp9_new_framerate(VP9_COMP *cpi, double framerate) {
cpi->framerate = framerate < 0.1 ? 30 : framerate;
vp9_rc_update_framerate(cpi);
}
static void set_tile_limits(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int min_log2_tile_cols, max_log2_tile_cols;
vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
cm->log2_tile_cols =
clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
cm->log2_tile_rows = cpi->oxcf.tile_rows;
if (cpi->oxcf.target_level == LEVEL_AUTO) {
const int level_tile_cols =
log_tile_cols_from_picsize_level(cpi->common.width, cpi->common.height);
if (cm->log2_tile_cols > level_tile_cols) {
cm->log2_tile_cols = VPXMAX(level_tile_cols, min_log2_tile_cols);
}
}
}
static void update_frame_size(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
vp9_set_mb_mi(cm, cm->width, cm->height);
vp9_init_context_buffers(cm);
vp9_init_macroblockd(cm, xd, NULL);
cpi->td.mb.mbmi_ext_base = cpi->mbmi_ext_base;
memset(cpi->mbmi_ext_base, 0,
cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
set_tile_limits(cpi);
}
static void init_buffer_indices(VP9_COMP *cpi) {
int ref_frame;
for (ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
cpi->ref_fb_idx[ref_frame] = ref_frame;
cpi->lst_fb_idx = cpi->ref_fb_idx[LAST_FRAME - 1];
cpi->gld_fb_idx = cpi->ref_fb_idx[GOLDEN_FRAME - 1];
cpi->alt_fb_idx = cpi->ref_fb_idx[ALTREF_FRAME - 1];
}
static void init_level_constraint(LevelConstraint *lc) {
lc->level_index = -1;
lc->max_cpb_size = INT_MAX;
lc->max_frame_size = INT_MAX;
lc->fail_flag = 0;
}
static void set_level_constraint(LevelConstraint *ls, int8_t level_index) {
vpx_clear_system_state();
ls->level_index = level_index;
if (level_index >= 0) {
ls->max_cpb_size = vp9_level_defs[level_index].max_cpb_size * (double)1000;
}
}
static void init_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
VP9_COMMON *const cm = &cpi->common;
cpi->oxcf = *oxcf;
cpi->framerate = oxcf->init_framerate;
cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth = oxcf->use_highbitdepth;
#endif
cm->color_space = oxcf->color_space;
cm->color_range = oxcf->color_range;
cpi->target_level = oxcf->target_level;
cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX;
set_level_constraint(&cpi->level_constraint,
get_level_index(cpi->target_level));
cm->width = oxcf->width;
cm->height = oxcf->height;
alloc_compressor_data(cpi);
cpi->svc.temporal_layering_mode = oxcf->temporal_layering_mode;
// Single thread case: use counts in common.
cpi->td.counts = &cm->counts;
// Spatial scalability.
cpi->svc.number_spatial_layers = oxcf->ss_number_layers;
// Temporal scalability.
cpi->svc.number_temporal_layers = oxcf->ts_number_layers;
if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
((cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
cpi->oxcf.pass != 1)) {
vp9_init_layer_context(cpi);
}
// change includes all joint functionality
vp9_change_config(cpi, oxcf);
cpi->static_mb_pct = 0;
cpi->ref_frame_flags = 0;
init_buffer_indices(cpi);
vp9_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height);
}
void vp9_check_reset_rc_flag(VP9_COMP *cpi) {
RATE_CONTROL *rc = &cpi->rc;
if (cpi->common.current_video_frame >
(unsigned int)cpi->svc.number_spatial_layers) {
if (cpi->use_svc) {
vp9_svc_check_reset_layer_rc_flag(cpi);
} else {
if (rc->avg_frame_bandwidth > (3 * rc->last_avg_frame_bandwidth >> 1) ||
rc->avg_frame_bandwidth < (rc->last_avg_frame_bandwidth >> 1)) {
rc->rc_1_frame = 0;
rc->rc_2_frame = 0;
rc->bits_off_target = rc->optimal_buffer_level;
rc->buffer_level = rc->optimal_buffer_level;
}
}
}
}
void vp9_set_rc_buffer_sizes(VP9_COMP *cpi) {
RATE_CONTROL *rc = &cpi->rc;
const VP9EncoderConfig *oxcf = &cpi->oxcf;
const int64_t bandwidth = oxcf->target_bandwidth;
const int64_t starting = oxcf->starting_buffer_level_ms;
const int64_t optimal = oxcf->optimal_buffer_level_ms;
const int64_t maximum = oxcf->maximum_buffer_size_ms;
rc->starting_buffer_level = starting * bandwidth / 1000;
rc->optimal_buffer_level =
(optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
rc->maximum_buffer_size =
(maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
// Under a configuration change, where maximum_buffer_size may change,
// keep buffer level clipped to the maximum allowed buffer size.
rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
}
#if CONFIG_VP9_HIGHBITDEPTH
// TODO(angiebird): make sdx8f available for highbitdepth if needed
#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX4DF) \
cpi->fn_ptr[BT].sdf = SDF; \
cpi->fn_ptr[BT].sdaf = SDAF; \
cpi->fn_ptr[BT].vf = VF; \
cpi->fn_ptr[BT].svf = SVF; \
cpi->fn_ptr[BT].svaf = SVAF; \
cpi->fn_ptr[BT].sdx4df = SDX4DF; \
cpi->fn_ptr[BT].sdx8f = NULL;
#define MAKE_BFP_SAD_WRAPPER(fnname) \
static unsigned int fnname##_bits8(const uint8_t *src_ptr, \
int source_stride, \
const uint8_t *ref_ptr, int ref_stride) { \
return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \
} \
static unsigned int fnname##_bits10( \
const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
int ref_stride) { \
return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \
} \
static unsigned int fnname##_bits12( \
const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
int ref_stride) { \
return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \
}
#define MAKE_BFP_SADAVG_WRAPPER(fnname) \
static unsigned int fnname##_bits8( \
const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
int ref_stride, const uint8_t *second_pred) { \
return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \
} \
static unsigned int fnname##_bits10( \
const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
int ref_stride, const uint8_t *second_pred) { \
return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
2; \
} \
static unsigned int fnname##_bits12( \
const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
int ref_stride, const uint8_t *second_pred) { \
return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
4; \
}
#define MAKE_BFP_SAD4D_WRAPPER(fnname) \
static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
const uint8_t *const ref_ptr[], int ref_stride, \
unsigned int *sad_array) { \
fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
} \
static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
const uint8_t *const ref_ptr[], int ref_stride, \
unsigned int *sad_array) { \
int i; \
fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
for (i = 0; i < 4; i++) sad_array[i] >>= 2; \
} \
static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
const uint8_t *const ref_ptr[], int ref_stride, \
unsigned int *sad_array) { \
int i; \
fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
for (i = 0; i < 4; i++) sad_array[i] >>= 4; \
}
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d)
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
static void highbd_set_var_fns(VP9_COMP *const cpi) {
VP9_COMMON *const cm = &cpi->common;
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
case VPX_BITS_8:
HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
vpx_highbd_8_sub_pixel_variance32x16,
vpx_highbd_8_sub_pixel_avg_variance32x16,
vpx_highbd_sad32x16x4d_bits8)
HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
vpx_highbd_8_sub_pixel_variance16x32,
vpx_highbd_8_sub_pixel_avg_variance16x32,
vpx_highbd_sad16x32x4d_bits8)
HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
vpx_highbd_8_sub_pixel_variance64x32,
vpx_highbd_8_sub_pixel_avg_variance64x32,
vpx_highbd_sad64x32x4d_bits8)
HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
vpx_highbd_8_sub_pixel_variance32x64,
vpx_highbd_8_sub_pixel_avg_variance32x64,
vpx_highbd_sad32x64x4d_bits8)
HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
vpx_highbd_8_sub_pixel_variance32x32,
vpx_highbd_8_sub_pixel_avg_variance32x32,
vpx_highbd_sad32x32x4d_bits8)
HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
vpx_highbd_8_sub_pixel_variance64x64,
vpx_highbd_8_sub_pixel_avg_variance64x64,
vpx_highbd_sad64x64x4d_bits8)
HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
vpx_highbd_8_sub_pixel_variance16x16,
vpx_highbd_8_sub_pixel_avg_variance16x16,
vpx_highbd_sad16x16x4d_bits8)
HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits8,
vpx_highbd_sad16x8_avg_bits8, vpx_highbd_8_variance16x8,
vpx_highbd_8_sub_pixel_variance16x8,
vpx_highbd_8_sub_pixel_avg_variance16x8,
vpx_highbd_sad16x8x4d_bits8)
HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits8,
vpx_highbd_sad8x16_avg_bits8, vpx_highbd_8_variance8x16,
vpx_highbd_8_sub_pixel_variance8x16,
vpx_highbd_8_sub_pixel_avg_variance8x16,
vpx_highbd_sad8x16x4d_bits8)
HIGHBD_BFP(
BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x4d_bits8)
HIGHBD_BFP(
BLOCK_8X4, vpx_highbd_sad8x4_bits8, vpx_highbd_sad8x4_avg_bits8,
vpx_highbd_8_variance8x4, vpx_highbd_8_sub_pixel_variance8x4,
vpx_highbd_8_sub_pixel_avg_variance8x4, vpx_highbd_sad8x4x4d_bits8)
HIGHBD_BFP(
BLOCK_4X8, vpx_highbd_sad4x8_bits8, vpx_highbd_sad4x8_avg_bits8,
vpx_highbd_8_variance4x8, vpx_highbd_8_sub_pixel_variance4x8,
vpx_highbd_8_sub_pixel_avg_variance4x8, vpx_highbd_sad4x8x4d_bits8)
HIGHBD_BFP(
BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x4d_bits8)
break;
case VPX_BITS_10:
HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
vpx_highbd_10_sub_pixel_variance32x16,
vpx_highbd_10_sub_pixel_avg_variance32x16,
vpx_highbd_sad32x16x4d_bits10)
HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
vpx_highbd_10_sub_pixel_variance16x32,
vpx_highbd_10_sub_pixel_avg_variance16x32,
vpx_highbd_sad16x32x4d_bits10)
HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
vpx_highbd_10_sub_pixel_variance64x32,
vpx_highbd_10_sub_pixel_avg_variance64x32,
vpx_highbd_sad64x32x4d_bits10)
HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
vpx_highbd_10_sub_pixel_variance32x64,
vpx_highbd_10_sub_pixel_avg_variance32x64,
vpx_highbd_sad32x64x4d_bits10)
HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
vpx_highbd_10_sub_pixel_variance32x32,
vpx_highbd_10_sub_pixel_avg_variance32x32,
vpx_highbd_sad32x32x4d_bits10)
HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
vpx_highbd_10_sub_pixel_variance64x64,
vpx_highbd_10_sub_pixel_avg_variance64x64,
vpx_highbd_sad64x64x4d_bits10)
HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
vpx_highbd_10_sub_pixel_variance16x16,
vpx_highbd_10_sub_pixel_avg_variance16x16,
vpx_highbd_sad16x16x4d_bits10)
HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
vpx_highbd_10_sub_pixel_variance16x8,
vpx_highbd_10_sub_pixel_avg_variance16x8,
vpx_highbd_sad16x8x4d_bits10)
HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
vpx_highbd_10_sub_pixel_variance8x16,
vpx_highbd_10_sub_pixel_avg_variance8x16,
vpx_highbd_sad8x16x4d_bits10)
HIGHBD_BFP(BLOCK_8X8, vpx_highbd_sad8x8_bits10,
vpx_highbd_sad8x8_avg_bits10, vpx_highbd_10_variance8x8,
vpx_highbd_10_sub_pixel_variance8x8,
vpx_highbd_10_sub_pixel_avg_variance8x8,
vpx_highbd_sad8x8x4d_bits10)
HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
vpx_highbd_10_sub_pixel_variance8x4,
vpx_highbd_10_sub_pixel_avg_variance8x4,
vpx_highbd_sad8x4x4d_bits10)
HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
vpx_highbd_10_sub_pixel_variance4x8,
vpx_highbd_10_sub_pixel_avg_variance4x8,
vpx_highbd_sad4x8x4d_bits10)
HIGHBD_BFP(BLOCK_4X4, vpx_highbd_sad4x4_bits10,
vpx_highbd_sad4x4_avg_bits10, vpx_highbd_10_variance4x4,
vpx_highbd_10_sub_pixel_variance4x4,
vpx_highbd_10_sub_pixel_avg_variance4x4,
vpx_highbd_sad4x4x4d_bits10)
break;
default:
assert(cm->bit_depth == VPX_BITS_12);
HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
vpx_highbd_12_sub_pixel_variance32x16,
vpx_highbd_12_sub_pixel_avg_variance32x16,
vpx_highbd_sad32x16x4d_bits12)
HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
vpx_highbd_12_sub_pixel_variance16x32,
vpx_highbd_12_sub_pixel_avg_variance16x32,
vpx_highbd_sad16x32x4d_bits12)
HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
vpx_highbd_12_sub_pixel_variance64x32,
vpx_highbd_12_sub_pixel_avg_variance64x32,
vpx_highbd_sad64x32x4d_bits12)
HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
vpx_highbd_12_sub_pixel_variance32x64,
vpx_highbd_12_sub_pixel_avg_variance32x64,
vpx_highbd_sad32x64x4d_bits12)
HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
vpx_highbd_12_sub_pixel_variance32x32,
vpx_highbd_12_sub_pixel_avg_variance32x32,
vpx_highbd_sad32x32x4d_bits12)
HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
vpx_highbd_12_sub_pixel_variance64x64,
vpx_highbd_12_sub_pixel_avg_variance64x64,
vpx_highbd_sad64x64x4d_bits12)
HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
vpx_highbd_12_sub_pixel_variance16x16,
vpx_highbd_12_sub_pixel_avg_variance16x16,
vpx_highbd_sad16x16x4d_bits12)
HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
vpx_highbd_12_sub_pixel_variance16x8,
vpx_highbd_12_sub_pixel_avg_variance16x8,
vpx_highbd_sad16x8x4d_bits12)
HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
vpx_highbd_12_sub_pixel_variance8x16,
vpx_highbd_12_sub_pixel_avg_variance8x16,
vpx_highbd_sad8x16x4d_bits12)
HIGHBD_BFP(BLOCK_8X8, vpx_highbd_sad8x8_bits12,
vpx_highbd_sad8x8_avg_bits12, vpx_highbd_12_variance8x8,
vpx_highbd_12_sub_pixel_variance8x8,
vpx_highbd_12_sub_pixel_avg_variance8x8,
vpx_highbd_sad8x8x4d_bits12)
HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
vpx_highbd_12_sub_pixel_variance8x4,
vpx_highbd_12_sub_pixel_avg_variance8x4,
vpx_highbd_sad8x4x4d_bits12)
HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
vpx_highbd_12_sub_pixel_variance4x8,
vpx_highbd_12_sub_pixel_avg_variance4x8,
vpx_highbd_sad4x8x4d_bits12)
HIGHBD_BFP(BLOCK_4X4, vpx_highbd_sad4x4_bits12,
vpx_highbd_sad4x4_avg_bits12, vpx_highbd_12_variance4x4,
vpx_highbd_12_sub_pixel_variance4x4,
vpx_highbd_12_sub_pixel_avg_variance4x4,
vpx_highbd_sad4x4x4d_bits12)
break;
}
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
static void realloc_segmentation_maps(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// Create the encoder segmentation map and set all entries to 0
vpx_free(cpi->segmentation_map);
CHECK_MEM_ERROR(cm, cpi->segmentation_map,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
// Create a map used for cyclic background refresh.
if (cpi->cyclic_refresh) vp9_cyclic_refresh_free(cpi->cyclic_refresh);
CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
vp9_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
// Create a map used to mark inactive areas.
vpx_free(cpi->active_map.map);
CHECK_MEM_ERROR(cm, cpi->active_map.map,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
// And a place holder structure is the coding context
// for use if we want to save and restore it
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
}
static void alloc_copy_partition_data(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
if (cpi->prev_partition == NULL) {
CHECK_MEM_ERROR(cm, cpi->prev_partition,
(BLOCK_SIZE *)vpx_calloc(cm->mi_stride * cm->mi_rows,
sizeof(*cpi->prev_partition)));
}
if (cpi->prev_segment_id == NULL) {
CHECK_MEM_ERROR(
cm, cpi->prev_segment_id,
(int8_t *)vpx_calloc((cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1),
sizeof(*cpi->prev_segment_id)));
}
if (cpi->prev_variance_low == NULL) {
CHECK_MEM_ERROR(cm, cpi->prev_variance_low,
(uint8_t *)vpx_calloc(
(cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1) * 25,
sizeof(*cpi->prev_variance_low)));
}
if (cpi->copied_frame_cnt == NULL) {
CHECK_MEM_ERROR(
cm, cpi->copied_frame_cnt,
(uint8_t *)vpx_calloc((cm->mi_stride >> 3) * ((cm->mi_rows >> 3) + 1),
sizeof(*cpi->copied_frame_cnt)));
}
}
void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int last_w = cpi->oxcf.width;
int last_h = cpi->oxcf.height;
vp9_init_quantizer(cpi);
if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
cm->color_space = oxcf->color_space;
cm->color_range = oxcf->color_range;
cpi->target_level = oxcf->target_level;
cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX;
set_level_constraint(&cpi->level_constraint,
get_level_index(cpi->target_level));
if (cm->profile <= PROFILE_1)
assert(cm->bit_depth == VPX_BITS_8);
else
assert(cm->bit_depth > VPX_BITS_8);
cpi->oxcf = *oxcf;
#if CONFIG_VP9_HIGHBITDEPTH
cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
#endif // CONFIG_VP9_HIGHBITDEPTH
if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
rc->baseline_gf_interval = FIXED_GF_INTERVAL;
} else {
rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2;
}
cpi->refresh_golden_frame = 0;
cpi->refresh_last_frame = 1;
cm->refresh_frame_context = 1;
cm->reset_frame_context = 0;
vp9_reset_segment_features(&cm->seg);
vp9_set_high_precision_mv(cpi, 0);
{
int i;
for (i = 0; i < MAX_SEGMENTS; i++)
cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
}
cpi->encode_breakout = cpi->oxcf.encode_breakout;
vp9_set_rc_buffer_sizes(cpi);
// Set up frame rate and related parameters rate control values.
vp9_new_framerate(cpi, cpi->framerate);
// Set absolute upper and lower quality limits
rc->worst_quality = cpi->oxcf.worst_allowed_q;
rc->best_quality = cpi->oxcf.best_allowed_q;
cm->interp_filter = cpi->sf.default_interp_filter;
if (cpi->oxcf.render_width > 0 && cpi->oxcf.render_height > 0) {
cm->render_width = cpi->oxcf.render_width;
cm->render_height = cpi->oxcf.render_height;
} else {
cm->render_width = cpi->oxcf.width;
cm->render_height = cpi->oxcf.height;
}
if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
cm->width = cpi->oxcf.width;
cm->height = cpi->oxcf.height;
cpi->external_resize = 1;
}
if (cpi->initial_width) {
int new_mi_size = 0;
vp9_set_mb_mi(cm, cm->width, cm->height);
new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
if (cm->mi_alloc_size < new_mi_size) {
vp9_free_context_buffers(cm);
alloc_compressor_data(cpi);
realloc_segmentation_maps(cpi);
cpi->initial_width = cpi->initial_height = 0;
cpi->external_resize = 0;
} else if (cm->mi_alloc_size == new_mi_size &&
(cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) {
vp9_alloc_loop_filter(cm);
}
}
if (cm->current_video_frame == 0 || last_w != cpi->oxcf.width ||
last_h != cpi->oxcf.height)
update_frame_size(cpi);
if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
memset(cpi->consec_zero_mv, 0,
cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
vp9_cyclic_refresh_reset_resize(cpi);
rc->rc_1_frame = 0;
rc->rc_2_frame = 0;
}
if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
((cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
cpi->oxcf.pass != 1)) {
vp9_update_layer_context_change_config(cpi,
(int)cpi->oxcf.target_bandwidth);
}
vp9_check_reset_rc_flag(cpi);
cpi->alt_ref_source = NULL;
rc->is_src_frame_alt_ref = 0;
#if 0
// Experimental RD Code
cpi->frame_distortion = 0;
cpi->last_frame_distortion = 0;
#endif
set_tile_limits(cpi);
cpi->ext_refresh_frame_flags_pending = 0;
cpi->ext_refresh_frame_context_pending = 0;
#if CONFIG_VP9_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
vp9_set_row_mt(cpi);
}
#ifndef M_LOG2_E
#define M_LOG2_E 0.693147180559945309417
#endif
#define log2f(x) (log(x) / (float)M_LOG2_E)
/***********************************************************************
* Read before modifying 'cal_nmvjointsadcost' or 'cal_nmvsadcosts' *
***********************************************************************
* The following 2 functions ('cal_nmvjointsadcost' and *
* 'cal_nmvsadcosts') are used to calculate cost lookup tables *
* used by 'vp9_diamond_search_sad'. The C implementation of the *
* function is generic, but the AVX intrinsics optimised version *
* relies on the following properties of the computed tables: *
* For cal_nmvjointsadcost: *
* - mvjointsadcost[1] == mvjointsadcost[2] == mvjointsadcost[3] *
* For cal_nmvsadcosts: *
* - For all i: mvsadcost[0][i] == mvsadcost[1][i] *
* (Equal costs for both components) *
* - For all i: mvsadcost[0][i] == mvsadcost[0][-i] *
* (Cost function is even) *
* If these do not hold, then the AVX optimised version of the *
* 'vp9_diamond_search_sad' function cannot be used as it is, in which *
* case you can revert to using the C function instead. *
***********************************************************************/
static void cal_nmvjointsadcost(int *mvjointsadcost) {
/*********************************************************************
* Warning: Read the comments above before modifying this function *
*********************************************************************/
mvjointsadcost[0] = 600;
mvjointsadcost[1] = 300;
mvjointsadcost[2] = 300;
mvjointsadcost[3] = 300;
}
static void cal_nmvsadcosts(int *mvsadcost[2]) {
/*********************************************************************
* Warning: Read the comments above before modifying this function *
*********************************************************************/
int i = 1;
mvsadcost[0][0] = 0;
mvsadcost[1][0] = 0;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
mvsadcost[0][i] = (int)z;
mvsadcost[1][i] = (int)z;
mvsadcost[0][-i] = (int)z;
mvsadcost[1][-i] = (int)z;
} while (++i <= MV_MAX);
}
static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
int i = 1;
mvsadcost[0][0] = 0;
mvsadcost[1][0] = 0;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
mvsadcost[0][i] = (int)z;
mvsadcost[1][i] = (int)z;
mvsadcost[0][-i] = (int)z;
mvsadcost[1][-i] = (int)z;
} while (++i <= MV_MAX);
}
static void init_ref_frame_bufs(VP9_COMMON *cm) {
int i;
BufferPool *const pool = cm->buffer_pool;
cm->new_fb_idx = INVALID_IDX;
for (i = 0; i < REF_FRAMES; ++i) {
cm->ref_frame_map[i] = INVALID_IDX;
}
for (i = 0; i < FRAME_BUFFERS; ++i) {
pool->frame_bufs[i].ref_count = 0;
}
}
static void update_initial_width(VP9_COMP *cpi, int use_highbitdepth,
int subsampling_x, int subsampling_y) {
VP9_COMMON *const cm = &cpi->common;
#if !CONFIG_VP9_HIGHBITDEPTH
(void)use_highbitdepth;
assert(use_highbitdepth == 0);
#endif
if (!cpi->initial_width ||
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth != use_highbitdepth ||
#endif
cm->subsampling_x != subsampling_x ||
cm->subsampling_y != subsampling_y) {
cm->subsampling_x = subsampling_x;
cm->subsampling_y = subsampling_y;
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth = use_highbitdepth;
#endif
alloc_util_frame_buffers(cpi);
cpi->initial_width = cm->width;
cpi->initial_height = cm->height;
cpi->initial_mbs = cm->MBs;
}
}
// TODO(angiebird): Check whether we can move this function to vpx_image.c
static INLINE void vpx_img_chroma_subsampling(vpx_img_fmt_t fmt,
unsigned int *subsampling_x,
unsigned int *subsampling_y) {
switch (fmt) {
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_I422:
case VPX_IMG_FMT_I42016:
case VPX_IMG_FMT_I42216: *subsampling_x = 1; break;
default: *subsampling_x = 0; break;
}
switch (fmt) {
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_I440:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_I42016:
case VPX_IMG_FMT_I44016: *subsampling_y = 1; break;
default: *subsampling_y = 0; break;
}
}
// TODO(angiebird): Check whether we can move this function to vpx_image.c
static INLINE int vpx_img_use_highbitdepth(vpx_img_fmt_t fmt) {
return fmt & VPX_IMG_FMT_HIGHBITDEPTH;
}
#if CONFIG_VP9_TEMPORAL_DENOISING
static void setup_denoiser_buffer(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
if (cpi->oxcf.noise_sensitivity > 0 &&
!cpi->denoiser.frame_buffer_initialized) {
if (vp9_denoiser_alloc(cm, &cpi->svc, &cpi->denoiser, cpi->use_svc,
cpi->oxcf.noise_sensitivity, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate denoiser");
}
}
#endif
void vp9_update_compressor_with_img_fmt(VP9_COMP *cpi, vpx_img_fmt_t img_fmt) {
const VP9EncoderConfig *oxcf = &cpi->oxcf;
unsigned int subsampling_x, subsampling_y;
const int use_highbitdepth = vpx_img_use_highbitdepth(img_fmt);
vpx_img_chroma_subsampling(img_fmt, &subsampling_x, &subsampling_y);
update_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
#if CONFIG_VP9_TEMPORAL_DENOISING
setup_denoiser_buffer(cpi);
#endif
assert(cpi->lookahead == NULL);
cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height, subsampling_x,
subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
use_highbitdepth,
#endif
oxcf->lag_in_frames);
alloc_raw_frame_buffers(cpi);
}
VP9_COMP *vp9_create_compressor(const VP9EncoderConfig *oxcf,
BufferPool *const pool) {
unsigned int i;
VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
if (!cm) return NULL;
vp9_zero(*cpi);
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
vp9_remove_compressor(cpi);
return 0;
}
cm->error.setjmp = 1;
cm->alloc_mi = vp9_enc_alloc_mi;
cm->free_mi = vp9_enc_free_mi;
cm->setup_mi = vp9_enc_setup_mi;
CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(
cm, cm->frame_contexts,
(FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
cpi->use_svc = 0;
cpi->resize_state = ORIG;
cpi->external_resize = 0;
cpi->resize_avg_qp = 0;
cpi->resize_buffer_underflow = 0;
cpi->use_skin_detection = 0;
cpi->common.buffer_pool = pool;
init_ref_frame_bufs(cm);
cpi->force_update_segmentation = 0;
init_config(cpi, oxcf);
cpi->frame_info = vp9_get_frame_info(oxcf);
vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
init_frame_indexes(cm);
cpi->partition_search_skippable_frame = 0;
cpi->tile_data = NULL;
realloc_segmentation_maps(cpi);
CHECK_MEM_ERROR(
cm, cpi->skin_map,
vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(cpi->skin_map[0])));
#if !CONFIG_REALTIME_ONLY
CHECK_MEM_ERROR(cm, cpi->alt_ref_aq, vp9_alt_ref_aq_create());
#endif
CHECK_MEM_ERROR(
cm, cpi->consec_zero_mv,
vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(*cpi->consec_zero_mv)));
CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
i++) {
CHECK_MEM_ERROR(
cm, cpi->mbgraph_stats[i].mb_stats,
vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
}
#if CONFIG_FP_MB_STATS
cpi->use_fp_mb_stats = 0;
if (cpi->use_fp_mb_stats) {
// a place holder used to store the first pass mb stats in the first pass
CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf,
vpx_calloc(cm->MBs * sizeof(uint8_t), 1));
} else {
cpi->twopass.frame_mb_stats_buf = NULL;
}
#endif
cpi->refresh_alt_ref_frame = 0;
cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
init_level_info(&cpi->level_info);
init_level_constraint(&cpi->level_constraint);
#if CONFIG_INTERNAL_STATS
cpi->b_calculate_blockiness = 1;
cpi->b_calculate_consistency = 1;
cpi->total_inconsistency = 0;
cpi->psnr.worst = 100.0;
cpi->worst_ssim = 100.0;
cpi->count = 0;
cpi->bytes = 0;
if (cpi->b_calculate_psnr) {
cpi->total_sq_error = 0;
cpi->total_samples = 0;
cpi->totalp_sq_error = 0;
cpi->totalp_samples = 0;
cpi->tot_recode_hits = 0;
cpi->summed_quality = 0;
cpi->summed_weights = 0;
cpi->summedp_quality = 0;
cpi->summedp_weights = 0;
}
cpi->fastssim.worst = 100.0;
cpi->psnrhvs.worst = 100.0;
if (cpi->b_calculate_blockiness) {
cpi->total_blockiness = 0;
cpi->worst_blockiness = 0.0;
}
if (cpi->b_calculate_consistency) {
CHECK_MEM_ERROR(cm, cpi->ssim_vars,
vpx_calloc(cpi->common.mi_rows * cpi->common.mi_cols,
sizeof(*cpi->ssim_vars) * 4));
cpi->worst_consistency = 100.0;
} else {
cpi->ssim_vars = NULL;
}
#endif
cpi->first_time_stamp_ever = INT64_MAX;
/*********************************************************************
* Warning: Read the comments around 'cal_nmvjointsadcost' and *
* 'cal_nmvsadcosts' before modifying how these tables are computed. *
*********************************************************************/
cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX];
cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
#if CONFIG_VP9_TEMPORAL_DENOISING
#ifdef OUTPUT_YUV_DENOISED
yuv_denoised_file = fopen("denoised.yuv", "ab");
#endif
#endif
#ifdef OUTPUT_YUV_SKINMAP
yuv_skinmap_file = fopen("skinmap.yuv", "wb");
#endif
#ifdef OUTPUT_YUV_REC
yuv_rec_file = fopen("rec.yuv", "wb");
#endif
#ifdef OUTPUT_YUV_SVC_SRC
yuv_svc_src[0] = fopen("svc_src_0.yuv", "wb");
yuv_svc_src[1] = fopen("svc_src_1.yuv", "wb");
yuv_svc_src[2] = fopen("svc_src_2.yuv", "wb");
#endif
#if 0
framepsnr = fopen("framepsnr.stt", "a");
kf_list = fopen("kf_list.stt", "w");
#endif
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
#if !CONFIG_REALTIME_ONLY
if (oxcf->pass == 1) {
vp9_init_first_pass(cpi);
} else if (oxcf->pass == 2) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
if (cpi->svc.number_spatial_layers > 1 ||
cpi->svc.number_temporal_layers > 1) {
FIRSTPASS_STATS *const stats = oxcf->two_pass_stats_in.buf;
FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = { 0 };
int i;
for (i = 0; i < oxcf->ss_number_layers; ++i) {
FIRSTPASS_STATS *const last_packet_for_layer =
&stats[packets - oxcf->ss_number_layers + i];
const int layer_id = (int)last_packet_for_layer->spatial_layer_id;
const int packets_in_layer = (int)last_packet_for_layer->count + 1;
if (layer_id >= 0 && layer_id < oxcf->ss_number_layers) {
int num_frames;
LAYER_CONTEXT *const lc = &cpi->svc.layer_context[layer_id];
vpx_free(lc->rc_twopass_stats_in.buf);
lc->rc_twopass_stats_in.sz = packets_in_layer * packet_sz;
CHECK_MEM_ERROR(cm, lc->rc_twopass_stats_in.buf,
vpx_malloc(lc->rc_twopass_stats_in.sz));
lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf;
lc->twopass.stats_in = lc->twopass.stats_in_start;
lc->twopass.stats_in_end =
lc->twopass.stats_in_start + packets_in_layer - 1;
// Note the last packet is cumulative first pass stats.
// So the number of frames is packet number minus one
num_frames = packets_in_layer - 1;
fps_init_first_pass_info(&lc->twopass.first_pass_info,
lc->rc_twopass_stats_in.buf, num_frames);
stats_copy[layer_id] = lc->rc_twopass_stats_in.buf;
}
}
for (i = 0; i < packets; ++i) {
const int layer_id = (int)stats[i].spatial_layer_id;
if (layer_id >= 0 && layer_id < oxcf->ss_number_layers &&
stats_copy[layer_id] != NULL) {
*stats_copy[layer_id] = stats[i];
++stats_copy[layer_id];
}
}
vp9_init_second_pass_spatial_svc(cpi);
} else {
int num_frames;
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
const size_t psz = cpi->common.MBs * sizeof(uint8_t);
const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz);
cpi->twopass.firstpass_mb_stats.mb_stats_start =
oxcf->firstpass_mb_stats_in.buf;
cpi->twopass.firstpass_mb_stats.mb_stats_end =
cpi->twopass.firstpass_mb_stats.mb_stats_start +
(ps - 1) * cpi->common.MBs * sizeof(uint8_t);
}
#endif
cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
// Note the last packet is cumulative first pass stats.
// So the number of frames is packet number minus one
num_frames = packets - 1;
fps_init_first_pass_info(&cpi->twopass.first_pass_info,
oxcf->two_pass_stats_in.buf, num_frames);
vp9_init_second_pass(cpi);
}
}
#endif // !CONFIG_REALTIME_ONLY
cpi->mb_wiener_var_cols = 0;
cpi->mb_wiener_var_rows = 0;
cpi->mb_wiener_variance = NULL;
vp9_set_speed_features_framesize_independent(cpi, oxcf->speed);
vp9_set_speed_features_framesize_dependent(cpi, oxcf->speed);
{
const int bsize = BLOCK_16X16;
const int w = num_8x8_blocks_wide_lookup[bsize];
const int h = num_8x8_blocks_high_lookup[bsize];
const int num_cols = (cm->mi_cols + w - 1) / w;
const int num_rows = (cm->mi_rows + h - 1) / h;
CHECK_MEM_ERROR(cm, cpi->mi_ssim_rdmult_scaling_factors,
vpx_calloc(num_rows * num_cols,
sizeof(*cpi->mi_ssim_rdmult_scaling_factors)));
}
cpi->kmeans_data_arr_alloc = 0;
#if CONFIG_NON_GREEDY_MV
cpi->tpl_ready = 0;
#endif // CONFIG_NON_GREEDY_MV
for (i = 0; i < MAX_ARF_GOP_SIZE; ++i) cpi->tpl_stats[i].tpl_stats_ptr = NULL;
// Allocate memory to store variances for a frame.
CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
cpi->source_var_thresh = 0;
cpi->frames_till_next_var_check = 0;
#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX4DF, SDX8F) \
cpi->fn_ptr[BT].sdf = SDF; \
cpi->fn_ptr[BT].sdaf = SDAF; \
cpi->fn_ptr[BT].vf = VF; \
cpi->fn_ptr[BT].svf = SVF; \
cpi->fn_ptr[BT].svaf = SVAF; \
cpi->fn_ptr[BT].sdx4df = SDX4DF; \
cpi->fn_ptr[BT].sdx8f = SDX8F;
// TODO(angiebird): make sdx8f available for every block size
BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16,
vpx_sad32x16x4d, NULL)
BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32,
vpx_sad16x32x4d, NULL)
BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32,
vpx_sad64x32x4d, NULL)
BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64,
vpx_sad32x64x4d, NULL)
BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
vpx_sad32x32x4d, vpx_sad32x32x8)
BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
vpx_sad64x64x4d, NULL)
BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
vpx_sad16x16x4d, vpx_sad16x16x8)
BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8,
vpx_sad16x8x4d, vpx_sad16x8x8)
BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16,
vpx_sad8x16x4d, vpx_sad8x16x8)
BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x4d,
vpx_sad8x8x8)
BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, vpx_sad8x4x4d,
NULL)
BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, vpx_sad4x8x4d,
NULL)
BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x4d,
vpx_sad4x4x8)
#if CONFIG_VP9_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
/* vp9_init_quantizer() is first called here. Add check in
* vp9_frame_init_quantizer() so that vp9_init_quantizer is only
* called later when needed. This will avoid unnecessary calls of
* vp9_init_quantizer() for every frame.
*/
vp9_init_quantizer(cpi);
vp9_loop_filter_init(cm);
// Set up the unit scaling factor used during motion search.
#if CONFIG_VP9_HIGHBITDEPTH
vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
cm->width, cm->height,
cm->use_highbitdepth);
#else
vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
cm->width, cm->height);
#endif // CONFIG_VP9_HIGHBITDEPTH
cpi->td.mb.me_sf = &cpi->me_sf;
cm->error.setjmp = 0;
#if CONFIG_RATE_CTRL
encode_command_init(&cpi->encode_command);
partition_info_init(cpi);
motion_vector_info_init(cpi);
fp_motion_vector_info_init(cpi);
#endif
return cpi;
}