Initial check-in for software m4v_h263 encoder

Change-Id: I4b49fa5c3a5e6e21cfd2419441d98dd784046367
This commit is contained in:
James Dong
2010-07-12 21:46:25 -07:00
parent d6343c26b8
commit 42ef0c715d
40 changed files with 26358 additions and 0 deletions

View File

@ -68,6 +68,7 @@ LOCAL_STATIC_LIBRARIES := \
libstagefright_avcdec \
libstagefright_avcenc \
libstagefright_m4vh263dec \
libstagefright_m4vh263enc \
libstagefright_mp3dec \
libstagefright_vorbisdec \
libstagefright_matroska \

View File

@ -27,6 +27,7 @@
#include "include/AVCDecoder.h"
#include "include/AVCEncoder.h"
#include "include/M4vH263Decoder.h"
#include "include/M4vH263Encoder.h"
#include "include/MP3Decoder.h"
#include "include/VorbisDecoder.h"
#include "include/VPXDecoder.h"
@ -83,6 +84,7 @@ FACTORY_CREATE_ENCODER(AMRNBEncoder)
FACTORY_CREATE_ENCODER(AMRWBEncoder)
FACTORY_CREATE_ENCODER(AACEncoder)
FACTORY_CREATE_ENCODER(AVCEncoder)
FACTORY_CREATE_ENCODER(M4vH263Encoder)
static sp<MediaSource> InstantiateSoftwareEncoder(
const char *name, const sp<MediaSource> &source,
@ -97,6 +99,7 @@ static sp<MediaSource> InstantiateSoftwareEncoder(
FACTORY_REF(AMRWBEncoder)
FACTORY_REF(AACEncoder)
FACTORY_REF(AVCEncoder)
FACTORY_REF(M4vH263Encoder)
};
for (size_t i = 0;
i < sizeof(kFactoryInfo) / sizeof(kFactoryInfo[0]); ++i) {
@ -181,10 +184,12 @@ static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Encoder" },
// { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.PV.mpeg4enc" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.Video.encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Encoder" },
// { MEDIA_MIMETYPE_VIDEO_H263, "OMX.PV.h263enc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" },

View File

@ -0,0 +1,37 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
M4vH263Encoder.cpp \
src/bitstream_io.cpp \
src/combined_encode.cpp \
src/datapart_encode.cpp \
src/dct.cpp \
src/findhalfpel.cpp \
src/fastcodemb.cpp \
src/fastidct.cpp \
src/fastquant.cpp \
src/me_utils.cpp \
src/mp4enc_api.cpp \
src/rate_control.cpp \
src/motion_est.cpp \
src/motion_comp.cpp \
src/sad.cpp \
src/sad_halfpel.cpp \
src/vlc_encode.cpp \
src/vop.cpp
LOCAL_MODULE := libstagefright_m4vh263enc
LOCAL_CFLAGS := \
-DBX_RC \
-DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/src \
$(LOCAL_PATH)/include \
$(TOP)/external/opencore/extern_libs_v2/khronos/openmax/include \
$(TOP)/frameworks/base/media/libstagefright/include
include $(BUILD_STATIC_LIBRARY)

View File

@ -0,0 +1,359 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "M4vH263Encoder"
#include <utils/Log.h>
#include "M4vH263Encoder.h"
#include "mp4enc_api.h"
#include "OMX_Video.h"
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDebug.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
namespace android {
inline static void ConvertYUV420SemiPlanarToYUV420Planar(
uint8_t *inyuv, uint8_t* outyuv,
int32_t width, int32_t height) {
int32_t outYsize = width * height;
uint32_t *outy = (uint32_t *) outyuv;
uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
/* Y copying */
memcpy(outy, inyuv, outYsize);
/* U & V copying */
uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
for (int32_t i = height >> 1; i > 0; --i) {
for (int32_t j = width >> 2; j > 0; --j) {
uint32_t temp = *inyuv_4++;
uint32_t tempU = temp & 0xFF;
tempU = tempU | ((temp >> 8) & 0xFF00);
uint32_t tempV = (temp >> 8) & 0xFF;
tempV = tempV | ((temp >> 16) & 0xFF00);
// Flip U and V
*outcb++ = tempV;
*outcr++ = tempU;
}
}
}
M4vH263Encoder::M4vH263Encoder(
const sp<MediaSource>& source,
const sp<MetaData>& meta)
: mSource(source),
mMeta(meta),
mNumInputFrames(-1),
mNextModTimeUs(0),
mStarted(false),
mInputBuffer(NULL),
mInputFrameData(NULL),
mGroup(NULL) {
LOGV("Construct software M4vH263Encoder");
mHandle = new tagvideoEncControls;
memset(mHandle, 0, sizeof(tagvideoEncControls));
mInitCheck = initCheck(meta);
}
M4vH263Encoder::~M4vH263Encoder() {
LOGV("Destruct software M4vH263Encoder");
if (mStarted) {
stop();
}
delete mEncParams;
delete mHandle;
}
status_t M4vH263Encoder::initCheck(const sp<MetaData>& meta) {
LOGV("initCheck");
CHECK(meta->findInt32(kKeyWidth, &mVideoWidth));
CHECK(meta->findInt32(kKeyHeight, &mVideoHeight));
CHECK(meta->findInt32(kKeySampleRate, &mVideoFrameRate));
CHECK(meta->findInt32(kKeyBitRate, &mVideoBitRate));
// XXX: Add more color format support
CHECK(meta->findInt32(kKeyColorFormat, &mVideoColorFormat));
if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
if (mVideoColorFormat != OMX_COLOR_FormatYUV420SemiPlanar) {
LOGE("Color format %d is not supported", mVideoColorFormat);
return BAD_VALUE;
}
// Allocate spare buffer only when color conversion is needed.
// Assume the color format is OMX_COLOR_FormatYUV420SemiPlanar.
mInputFrameData =
(uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
CHECK(mInputFrameData);
}
// XXX: Remove this restriction
if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
LOGE("Video frame size %dx%d must be a multiple of 16",
mVideoWidth, mVideoHeight);
return BAD_VALUE;
}
mEncParams = new tagvideoEncOptions;
memset(mEncParams, 0, sizeof(tagvideoEncOptions));
if (!PVGetDefaultEncOption(mEncParams, 0)) {
LOGE("Failed to get default encoding parameters");
return BAD_VALUE;
}
// Need to know which role the encoder is in.
// XXX: Set the mode proper for other types of applications
// like streaming or video conference
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
CHECK(!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcmp(mime, MEDIA_MIMETYPE_VIDEO_H263));
if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
mEncParams->encMode = COMBINE_MODE_WITH_ERR_RES;
} else {
mEncParams->encMode = H263_MODE;
}
mEncParams->encWidth[0] = mVideoWidth;
mEncParams->encHeight[0] = mVideoHeight;
mEncParams->encFrameRate[0] = mVideoFrameRate;
mEncParams->rcType = VBR_1;
mEncParams->vbvDelay = (float)5.0;
// Set profile and level
// If profile and level setting is not correct, failure
// is reported when the encoder is initialized.
mEncParams->profile_level = CORE_PROFILE_LEVEL2;
int32_t profileLevel;
if (meta->findInt32(kKeyVideoLevel, &profileLevel)) {
mEncParams->profile_level = (ProfileLevelType)profileLevel;
}
mEncParams->packetSize = 32;
mEncParams->rvlcEnable = PV_OFF;
mEncParams->numLayers = 1;
mEncParams->timeIncRes = 1000;
mEncParams->tickPerSrc = mEncParams->timeIncRes / mVideoFrameRate;
mEncParams->bitRate[0] = mVideoBitRate;
mEncParams->iQuant[0] = 15;
mEncParams->pQuant[0] = 12;
mEncParams->quantType[0] = 0;
mEncParams->noFrameSkipped = PV_OFF;
// Set IDR frame refresh interval
int32_t iFramesIntervalSec;
CHECK(meta->findInt32(kKeyIFramesInterval, &iFramesIntervalSec));
if (iFramesIntervalSec < 0) {
mEncParams->intraPeriod = -1;
} else if (iFramesIntervalSec == 0) {
mEncParams->intraPeriod = 1; // All I frames
} else {
mEncParams->intraPeriod =
(iFramesIntervalSec * mVideoFrameRate);
}
mEncParams->numIntraMB = 0;
mEncParams->sceneDetect = PV_ON;
mEncParams->searchRange = 16;
mEncParams->mv8x8Enable = PV_OFF;
mEncParams->gobHeaderInterval = 0;
mEncParams->useACPred = PV_ON;
mEncParams->intraDCVlcTh = 0;
mFormat = new MetaData;
mFormat->setInt32(kKeyWidth, mVideoWidth);
mFormat->setInt32(kKeyHeight, mVideoHeight);
mFormat->setInt32(kKeyBitRate, mVideoBitRate);
mFormat->setInt32(kKeySampleRate, mVideoFrameRate);
mFormat->setInt32(kKeyColorFormat, mVideoColorFormat);
mFormat->setCString(kKeyMIMEType, mime);
mFormat->setCString(kKeyDecoderComponent, "M4vH263Encoder");
return OK;
}
status_t M4vH263Encoder::start(MetaData *params) {
LOGV("start");
if (mInitCheck != OK) {
return mInitCheck;
}
if (mStarted) {
LOGW("Call start() when encoder already started");
return OK;
}
if (!PVInitVideoEncoder(mHandle, mEncParams)) {
LOGE("Failed to initialize the encoder");
return UNKNOWN_ERROR;
}
mGroup = new MediaBufferGroup();
int32_t maxSize;
if (!PVGetMaxVideoFrameSize(mHandle, &maxSize)) {
maxSize = 256 * 1024; // Magic #
}
LOGV("Max output buffer size: %d", maxSize);
mGroup->add_buffer(new MediaBuffer(maxSize));
mSource->start(params);
mNumInputFrames = -1; // 1st frame contains codec specific data
mStarted = true;
return OK;
}
status_t M4vH263Encoder::stop() {
LOGV("stop");
if (!mStarted) {
LOGW("Call stop() when encoder has not started");
return OK;
}
if (mInputBuffer) {
mInputBuffer->release();
mInputBuffer = NULL;
}
if (mGroup) {
delete mGroup;
mGroup = NULL;
}
if (mInputFrameData) {
delete mInputFrameData;
mInputFrameData = NULL;
}
CHECK(PVCleanUpVideoEncoder(mHandle));
mSource->stop();
mStarted = false;
return OK;
}
sp<MetaData> M4vH263Encoder::getFormat() {
LOGV("getFormat");
return mFormat;
}
status_t M4vH263Encoder::read(
MediaBuffer **out, const ReadOptions *options) {
CHECK(!options);
*out = NULL;
MediaBuffer *outputBuffer;
CHECK_EQ(OK, mGroup->acquire_buffer(&outputBuffer));
uint8_t *outPtr = (uint8_t *) outputBuffer->data();
int32_t dataLength = outputBuffer->size();
// Output codec specific data
if (mNumInputFrames < 0) {
if (!PVGetVolHeader(mHandle, outPtr, &dataLength, 0)) {
LOGE("Failed to get VOL header");
return UNKNOWN_ERROR;
}
LOGV("Output VOL header: %d bytes", dataLength);
outputBuffer->meta_data()->setInt32(kKeyIsCodecConfig, 1);
outputBuffer->set_range(0, dataLength);
*out = outputBuffer;
++mNumInputFrames;
return OK;
}
// Ready for accepting an input video frame
if (OK != mSource->read(&mInputBuffer, options)) {
LOGE("Failed to read from data source");
outputBuffer->release();
return UNKNOWN_ERROR;
}
int64_t timeUs;
CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
if (mNextModTimeUs > timeUs) {
LOGV("mNextModTimeUs %lld > timeUs %lld", mNextModTimeUs, timeUs);
outputBuffer->set_range(0, 0);
*out = outputBuffer;
mInputBuffer->release();
mInputBuffer = NULL;
return OK;
}
// Color convert to OMX_COLOR_FormatYUV420Planar if necessary
outputBuffer->meta_data()->setInt64(kKeyTime, timeUs);
uint8_t *inPtr = (uint8_t *) mInputBuffer->data();
if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
CHECK(mInputFrameData);
CHECK(mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar);
ConvertYUV420SemiPlanarToYUV420Planar(
inPtr, mInputFrameData, mVideoWidth, mVideoHeight);
inPtr = mInputFrameData;
}
CHECK(inPtr != NULL);
// Ready for encoding a video frame
VideoEncFrameIO vin, vout;
vin.height = ((mVideoHeight + 15) >> 4) << 4;
vin.pitch = ((mVideoWidth + 15) >> 4) << 4;
vin.timestamp = (timeUs + 500) / 1000; // in ms
vin.yChan = inPtr;
vin.uChan = vin.yChan + vin.height * vin.pitch;
vin.vChan = vin.uChan + ((vin.height * vin.pitch) >> 2);
unsigned long modTimeMs = 0;
int32_t nLayer = 0;
MP4HintTrack hintTrack;
if (!PVEncodeVideoFrame(mHandle, &vin, &vout,
&modTimeMs, outPtr, &dataLength, &nLayer) ||
!PVGetHintTrack(mHandle, &hintTrack)) {
LOGE("Failed to encode frame or get hink track at frame %lld",
mNumInputFrames);
outputBuffer->release();
mInputBuffer->release();
mInputBuffer = NULL;
return UNKNOWN_ERROR;
}
CHECK_EQ(NULL, PVGetOverrunBuffer(mHandle));
if (hintTrack.CodeType == 0) { // I-frame serves as sync frame
outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
}
++mNumInputFrames;
mNextModTimeUs = modTimeMs * 1000LL;
outputBuffer->set_range(0, dataLength);
*out = outputBuffer;
mInputBuffer->release();
mInputBuffer = NULL;
return OK;
}
void M4vH263Encoder::signalBufferReturned(MediaBuffer *buffer) {
}
} // namespace android

View File

@ -0,0 +1,437 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*********************************************************************************/
/* File: cvei.h */
/* Purpose: */
/* Date: */
/* Revision History: */
/** @introduction Common Video Encoder Interface (CVEI) is intended to be used by
application developers who want to create a multimedia application with video
encoding feature. CVEI is designed such that new video encoder algorithms or
modules can be plugged in seamlessly without user interaction. In other words,
any changes to the CVEI library are transparent to the users. Users can still
use the same set of APIs for new encoding tools.
@requirement CVEI will take an input frame in one of several format supported
by PV and encode it to an MPEG4 bitstream. It will also return a reconstructed
image in YUV 4:2:0 format. Currently the input format supported are YUV 4:2:0,
RGB24 and UYVY 4:2:2.
CVEI is designed such that it is simple to use. It should hides implementation
dependency from the users. In this version, we decided that the operation will
be synchronous, i.e., the encoding will be a blocked call. Asynchronous operation
will be in the level above CVEI, i.e., in Author Engine Video Module which will
take care of capturing device as well.
@brief The following classes are used to interface with codecs. Their names
are CPVxxxVideoEncoder where xxx is codec specific such as MPEG4, H263, H26L,
etc. All of them are subclasses of CPVCommonVideoEncoder.
*/
/*********************************************************************************/
#ifndef __CVEI_H
#define __CVEI_H
#include "oscl_scheduler_ao.h"
#include "oscl_base.h"
#include "mp4enc_api.h" /* for MP4HintTrack */
#define MAX_LAYER 2
/** General returned values. */
enum TCVEI_RETVAL
{
ECVEI_SUCCESS,
ECVEI_FAIL,
ECVEI_FLUSH,
ECVEI_MORE_OUTPUT
} ;
/** Returned events with the callback function. */
enum TCVEI_EVENT
{
/** Called when a packet or a frame of output bitstream is ready. */
ECVEI_BUFFER_READY,
/** Called when the last packet of a frame of output bitstream is ready. */
ECVEI_FRAME_DONE,
/** Called when no buffers is available for output bitstream. A buffer can be added thru AddBuffer API. */
ECVEI_NO_BUFFERS,
/** Called when there is an error with the encoding operation. */
ECVEI_ERROR
};
/** Contains supported input format */
enum TPVVideoFormat
{
ECVEI_RGB24,
ECVEI_RGB12,
ECVEI_YUV420,
ECVEI_UYVY,
ECVEI_YUV420SEMIPLANAR
};
/** Type of contents for optimal encoding mode. */
enum TPVContentType
{
/** Content is to be streamed in real-time. */
ECVEI_STREAMING,
/** Content is to be downloaded and playbacked later.*/
ECVEI_DOWNLOAD,
/** Content is to be 3gpp baseline compliant. */
ECVEI_H263
};
/** Rate control type. */
enum TMP4RateControlType
{
/** Constant quality, variable bit rate, fixed quantization level. */
ECONSTANT_Q,
/** Short-term constant bit rate control. */
ECBR_1,
/** Long-term constant bit rate control. */
EVBR_1
};
/** Targeted profile and level to encode. */
enum TPVM4VProfileLevel
{
/* Non-scalable profile */
ECVEI_SIMPLE_LEVEL0 = 0,
ECVEI_SIMPLE_LEVEL1,
ECVEI_SIMPLE_LEVEL2,
ECVEI_SIMPLE_LEVEL3,
ECVEI_CORE_LEVEL1,
ECVEI_CORE_LEVEL2,
/* Scalable profile */
ECVEI_SIMPLE_SCALABLE_LEVEL0 = 6,
ECVEI_SIMPLE_SCALABLE_LEVEL1,
ECVEI_SIMPLE_SCALABLE_LEVEL2,
ECVEI_CORE_SCALABLE_LEVEL1 = 10,
ECVEI_CORE_SCALABLE_LEVEL2,
ECVEI_CORE_SCALABLE_LEVEL3
};
/** This structure contains encoder settings. */
struct TPVVideoEncodeParam
{
/** Specifies an ID that will be used to specify this encoder while returning
the bitstream in asynchronous mode. */
uint32 iEncodeID;
/** Specifies whether base only (iNumLayer = 1) or base + enhancement layer
(iNumLayer =2 ) is to be used. */
int32 iNumLayer;
/** Specifies the width in pixels of the encoded frames. IFrameWidth[0] is for
base layer and iFrameWidth[1] is for enhanced layer. */
int iFrameWidth[MAX_LAYER];
/** Specifies the height in pixels of the encoded frames. IFrameHeight[0] is for
base layer and iFrameHeight[1] is for enhanced layer. */
int iFrameHeight[MAX_LAYER];
/** Specifies the cumulative bit rate in bit per second. IBitRate[0] is for base
layer and iBitRate[1] is for base+enhanced layer.*/
int iBitRate[MAX_LAYER];
/** Specifies the cumulative frame rate in frame per second. IFrameRate[0] is for
base layer and iFrameRate[1] is for base+enhanced layer. */
float iFrameRate[MAX_LAYER];
/** Specifies the picture quality factor on the scale of 1 to 10. It trades off
the picture quality with the frame rate. Higher frame quality means lower frame rate.
Lower frame quality for higher frame rate.*/
int32 iFrameQuality;
/** Enable the use of iFrameQuality to determine the frame rate. If it is false,
the encoder will try to meet the specified frame rate regardless of the frame quality.*/
bool iEnableFrameQuality;
/** Specifies the maximum number of P-frames between 2 INTRA frames. An INTRA mode is
forced to a frame once this interval is reached. When there is only one I-frame is present
at the beginning of the clip, iIFrameInterval should be set to -1. */
int32 iIFrameInterval;
/** According to iIFrameInterval setting, the minimum number of intra MB per frame is
optimally calculated for error resiliency. However, when iIFrameInterval is set to -1,
iNumIntraMBRefresh must be specified to guarantee the minimum number of intra
macroblocks per frame.*/
uint32 iNumIntraMBRefresh;
/** Specifies the VBV buffer size which determines the end-to-end delay between the
encoder and the decoder. The size is in unit of seconds. For download application,
the buffer size can be larger than the streaming application. For 2-way application,
this buffer shall be kept minimal. For a special case, in VBR mode, iBufferDelay will
be set to -1 to allow buffer underflow. */
float iBufferDelay;
/** Specifies the type of the access whether it is streaming, CVEI_STREAMING
(data partitioning mode) or download, CVEI_DOWNLOAD (combined mode).*/
TPVContentType iContentType;
/** Specifies the rate control algorithm among one of the following constant Q,
CBR and VBR. The structure TMP4RateControlType is defined below.*/
TMP4RateControlType iRateControlType;
/** Specifies high quality but also high complexity mode for rate control. */
bool iRDOptimal;
/** Specifies the initial quantization parameter for the first I-frame. If constant Q
rate control is used, this QP will be used for all the I-frames. This number must be
set between 1 and 31, otherwise, Initialize() will fail. */
int iIquant[2];
/** Specifies the initial quantization parameter for the first P-frame. If constant Q
rate control is used, this QP will be used for all the P-frames. This number must be
set between 1 and 31, otherwise, Initialize() will fail. */
int iPquant[2];
/** Specifies the initial quantization parameter for the first B-frame. If constant Q
rate control is used, this QP will be used for all the B-frames. This number must be
set between 1 and 31, otherwise, Initialize() will fail. */
int iBquant[2];
/** Specifies the search range in pixel unit for motion vector. The range of the
motion vector will be of dimension [-iSearchRange.5, +iSearchRange.0]. */
int32 iSearchRange;
/** Specifies the use of 8x8 motion vectors. */
bool iMV8x8;
/** Specifies the use of half-pel motion vectors. */
bool iMVHalfPel;
/** Specifies automatic scene detection where I-frame will be used the the first frame
in a new scene. */
bool iSceneDetection;
/** Specifies the packet size in bytes which represents the number of bytes between two resync markers.
For ECVEI_DOWNLOAD and ECVEI_H263, if iPacketSize is set to 0, there will be no resync markers in the bitstream.
For ECVEI_STREAMING is parameter must be set to a value greater than 0.*/
uint32 iPacketSize;
/** Specifies whether the current frame skipping decision is allowed after encoding
the current frame. If there is no memory of what has been coded for the current frame,
iNoCurrentSkip has to be on. */
bool iNoCurrentSkip;
/** Specifies that no frame skipping is allowed. Frame skipping is a tool used to
control the average number of bits spent to meet the target bit rate. */
bool iNoFrameSkip;
/** Specifies the duration of the clip in millisecond.*/
int32 iClipDuration;
/** Specifies the profile and level used to encode the bitstream. When present,
other settings will be checked against the range allowable by this target profile
and level. Fail may be returned from the Initialize call. */
TPVM4VProfileLevel iProfileLevel;
/** Specifies FSI Buffer input */
uint8* iFSIBuff;
/** Specifies FSI Buffer Length */
int iFSIBuffLength;
};
/** Structure for input format information */
struct TPVVideoInputFormat
{
/** Contains the width in pixels of the input frame. */
int32 iFrameWidth;
/** Contains the height in pixels of the input frame. */
int32 iFrameHeight;
/** Contains the input frame rate in the unit of frame per second. */
float iFrameRate;
/** Contains Frame Orientation. Used for RGB input. 1 means Bottom_UP RGB, 0 means Top_Down RGB, -1 for video formats other than RGB*/
int iFrameOrientation;
/** Contains the format of the input video, e.g., YUV 4:2:0, UYVY, RGB24, etc. */
TPVVideoFormat iVideoFormat;
};
/** Contains the input data information */
struct TPVVideoInputData
{
/** Pointer to an input frame buffer in input source format.*/
uint8 *iSource;
/** The corresponding time stamp of the input frame. */
uint32 iTimeStamp;
};
/** Contains the output data information */
struct TPVVideoOutputData
{
/** Pointer to the reconstructed frame buffer in YUV 4:2:0 domain. */
uint8 *iFrame;
/** The number of layer encoded, 0 for base, 1 for enhanced. */
int32 iLayerNumber;
/** Pointer to the encoded bitstream buffer. */
uint8 *iBitStream;
/** The size in bytes of iBStream. */
int32 iBitStreamSize;
/** The time stamp of the encoded frame according to the bitstream. */
uint32 iVideoTimeStamp;
/** The time stamp of the encoded frame as given before the encoding. */
uint32 iExternalTimeStamp;
/** The hint track information. */
MP4HintTrack iHintTrack;
};
/** An observer class for callbacks to report the status of the CVEI */
class MPVCVEIObserver
{
public:
/** The callback funtion with aEvent being one of TCVEIEvent enumeration. */
virtual void HandlePVCVEIEvent
(uint32 aId, uint32 aEvent, uint32 aParam1 = 0) = 0;
virtual ~MPVCVEIObserver() {}
};
/** This class is the base class for codec specific interface class.
The users must maintain an instance of the codec specific class throughout
the encoding session.
*/
class CommonVideoEncoder : public OsclTimerObject
{
public:
/** Constructor for CVEI class. */
CommonVideoEncoder() : OsclTimerObject(OsclActiveObject::EPriorityNominal, "PVEncoder") {};
/** Initialization function to set the input video format and the
encoding parameters. This function returns CVEI_ERROR if there is
any errors. Otherwise, the function returns CVEI_SUCCESS.*/
virtual TCVEI_RETVAL Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam) = 0;
/** Set the observer for asynchronous encoding mode. */
virtual TCVEI_RETVAL SetObserver(MPVCVEIObserver *aObserver) = 0;
/** Add a buffer to the queue of output buffers for output bitstream in
asynchronous encoding mode. */
virtual TCVEI_RETVAL AddBuffer(TPVVideoOutputData *aVidOut) = 0;
/** This function sends in an input video data structure containing a source
frame and the associated timestamp. The encoded bitstream will be returned by
observer callback.
The above 3 APIs only replace EncodeFrame() API. Other APIs such as initialization
and update parameters remain the same. */
virtual TCVEI_RETVAL Encode(TPVVideoInputData *aVidIn) = 0;
/** This function returns the maximum VBV buffer size such that the
application can allocate a buffer that guarantees to fit one frame.*/
virtual int32 GetBufferSize() = 0;
/** This function returns the VOL header part (starting from the VOS header)
of the encoded bitstream. This function must be called after Initialize.
The output is written to the memory (volHeader) allocated by the users.*/
virtual TCVEI_RETVAL GetVolHeader(uint8 *volHeader, int32 *size, int32 layer) = 0;
/** This function sends in an input video data structure containing a source
frame and the associated timestamp. It returns an output video data structure
containing coded bit stream, reconstructed frame in YUV 4:2:0 (can be changed
to source format) and the timestamp associated with the coded frame.
The input timestamp may not correspond to the output timestamp. User can send
an input structure in without getting any encoded data back or getting an encoded
frame in the past. This function returns ECVEI_ERROR if there is any errors.
Otherwise, the function returns ECVEI_SUCCESS.
In case of Overrun Buffer usage, it is possible that return value is ECVEI_MORE_OUTPUT
which indicates that frame cannot fit in the current buffer*/
virtual TCVEI_RETVAL EncodeFrame(TPVVideoInputData *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes
#ifdef PVAUTHOR_PROFILING
, void *aParam1 = 0
#endif
) = 0;
/** Before the termination of the encoding process, the users have to query
whether there are any encoded frame pending inside the CVEI. The returned value
will indicate whether there are more frames to be flushed (ECVEI_FLUSH).
FlushOutput has to be called until there are no more frames, i.e., it returns
ECVEI_SUCCESS. This function may be called during the encoding operation if
there is no input frame and the application does not want to waste the time
waiting for input frame. It can call this function to flush encoded frame
out of the memory. */
virtual TCVEI_RETVAL FlushOutput(TPVVideoOutputData *aVidOut) = 0;
/** This function cleanup the CVEI allocated resources. */
virtual TCVEI_RETVAL Terminate() = 0;
/**This function dynamically changes the target bit rate of the encoder
while encoding. aBitRate[n] is the new accumulate target bit rate of layer n.
Successful update is returned with ECVEI_SUCCESS.*/
virtual TCVEI_RETVAL UpdateBitRate(int32 aNumLayer, int32 *aBitRate) = 0;
/** This function dynamically changes the target frame rate of the encoder
while encoding. aFrameRate[n] is the new accumulate target frame rate of
layer n. Successful update is returned with ECVEI_SUCCESS. */
virtual TCVEI_RETVAL UpdateFrameRate(int32 aNumLayer, float *aFrameRate) = 0;
/** This function dynamically changes the I-Vop update interval while
encoding to a new value, aIFrameInterval. */
virtual TCVEI_RETVAL UpdateIFrameInterval(int32 aIFrameInterval) = 0;
/** This function forces an I-Vop mode to the next frame to be encoded. */
virtual TCVEI_RETVAL IFrameRequest() = 0;
/** This function returns the input width of a specific layer
(not necessarily multiple of 16). */
virtual int32 GetEncodeWidth(int32 aLayer) = 0;
/** This function returns the input height of a specific layer
(not necessarily multiple of 16). */
virtual int32 GetEncodeHeight(int32 aLayer) = 0;
/** This function returns the target encoded frame rate of a specific layer. */
virtual float GetEncodeFrameRate(int32 aLayer) = 0;
protected:
virtual void Run(void) = 0;
virtual void DoCancel(void) = 0;
/* internal enum */
enum TCVEIState
{
EIdle,
EEncode
};
TCVEIState iState;
uint32 iId;
};
#endif

View File

@ -0,0 +1,454 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _MP4ENC_API_H_
#define _MP4ENC_API_H_
#include <string.h>
#ifndef _PV_TYPES_
#define _PV_TYPES_
typedef unsigned char UChar;
typedef char Char;
typedef unsigned int UInt;
typedef int Int;
typedef unsigned short UShort;
typedef short Short;
typedef unsigned int Bool;
typedef unsigned long ULong;
#define PV_CODEC_INIT 0
#define PV_CODEC_STOP 1
#endif
#define PV_TRUE 1
#define PV_FALSE 0
typedef enum
{
SHORT_HEADER,
SHORT_HEADER_WITH_ERR_RES,
H263_MODE,
H263_MODE_WITH_ERR_RES,
DATA_PARTITIONING_MODE,
COMBINE_MODE_NO_ERR_RES,
COMBINE_MODE_WITH_ERR_RES
} MP4EncodingMode;
typedef enum
{
CONSTANT_Q,
CBR_1,
VBR_1,
CBR_2,
VBR_2,
CBR_LOWDELAY
} MP4RateControlType;
typedef enum
{
PASS1,
PASS2
} PassNum;
typedef enum
{
PV_OFF,
PV_ON
} ParamEncMode;
/* {SPL0, SPL1, SPL2, SPL3, CPL1, CPL2, CPL2, CPL2} , SPL0: Simple Profile@Level0 , CPL1: Core Profile@Level1 */
/* {SSPL0, SSPL1, SSPL2, SSPL2, CSPL1, CSPL2, CSPL3, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CPL1: Core Scalable Profile@Level1 */
typedef enum
{
/* Non-scalable profile */
SIMPLE_PROFILE_LEVEL0 = 0,
SIMPLE_PROFILE_LEVEL1,
SIMPLE_PROFILE_LEVEL2,
SIMPLE_PROFILE_LEVEL3,
CORE_PROFILE_LEVEL1,
CORE_PROFILE_LEVEL2,
/* Scalable profile */
SIMPLE_SCALABLE_PROFILE_LEVEL0 = 6,
SIMPLE_SCALABLE_PROFILE_LEVEL1,
SIMPLE_SCALABLE_PROFILE_LEVEL2,
CORE_SCALABLE_PROFILE_LEVEL1 = 10,
CORE_SCALABLE_PROFILE_LEVEL2,
CORE_SCALABLE_PROFILE_LEVEL3
} ProfileLevelType;
typedef struct tagMP4HintTrack
{
UChar MTB;
UChar LayerID;
UChar CodeType;
UChar RefSelCode;
} MP4HintTrack;
typedef struct tagvideoEncControls
{
void *videoEncoderData;
Int videoEncoderInit;
} VideoEncControls;
typedef struct tagvideoEncFrameIO
{
UChar *yChan; /* pointer to Y */
UChar *uChan; /* pointer to U */
UChar *vChan; /* pointer to V */
Int height; /* height for Y */
Int pitch; /* stride for Y */
ULong timestamp; /* modulo timestamp in millisecond*/
} VideoEncFrameIO ;
/**
@brief Encoding options structure */
typedef struct tagvideoEncOptions
{
/** @brief Sets the encoding mode, defined by the above enumaration. If there are conflicts between the encoding mode
* and subsequent encoding options, encoding mode take precedent over encoding options. */
MP4EncodingMode encMode;
/** @brief Sets the number of bytes per packet, only used in DATA_PARTITIONING_MODE or COMBINE_MODE_WITH_ERR_RES mode.
* The resync marker will be inserted as often as the size of the packet.*/
Int packetSize;
/** @brief Selects MPEG-4/H.263 profile and level, if specified other encoding options must conform with it. */
ProfileLevelType profile_level;
/** @brief Enables reversible variable length code (RVLC) mode. Normally it is set to PV_OFF.*/
ParamEncMode rvlcEnable;
/** @brief Set the frequency of GOB header interval */
Int gobHeaderInterval;
/** @brief Sets the number of bitstream layers: 1 is base only: 2 is base + enhancement */
Int numLayers;
/** @brief Sets the number of ticks per second used for timing information encoded in MPEG4 bitstream.*/
Int timeIncRes;
/** @brief Sets the number of ticks in time increment resolution between 2 source frames (equivalent to source frame rate). */
Int tickPerSrc;
/** @brief Specifies encoded heights in pixels, height[n] represents the n-th layer's height. */
Int encHeight[2];
/** @brief Specifies encoded widths in pixels, width[n] represents the n-th layer's width.*/
Int encWidth[2];
/** @brief Specifies target frame rates in frames per second, frameRate[n] represents the n-th layer's target frame rate.*/
float encFrameRate[2];
/** @brief Specifies target bit rates in bits per second unit, bitRate[n] represents the n-th layer's target bit rate. */
Int bitRate[2];
/** @brief Specifies default quantization parameters for I-Vop. Iquant[n] represents the n-th layer default quantization parameter. The default is Iquant[0]=12.*/
Int iQuant[2];
/** @brief Specifies default quantization parameters for P-Vop. Pquant[n] represents the n-th layer default quantization parameter. The default is Pquant[0]=10.*/
Int pQuant[2];
/** @brief specifies quantization mode (H263 mode or MPEG mode) of the encoded base and enhance layer (if any).
* In Simple and Simple Scalable profile, we use only H263 mode.*/
Int quantType[2];
/** @brief Sets rate control algorithm, one of (CONSTANT_Q, CBR_1, or VBR_1).
* CONSTANT_Q uses the default quantization values to encode the sequence.
* CBR_1 (constant bit rate) controls the output at a desired bit rate
* VBR_1 (variable bit rate) gives better picture quality at the expense of bit rate fluctuation
* Note: type=CONSTANT_Q produces sequences with arbitrary bit rate.
* type=CBR_1 produces sequences suitable for streaming.
* type=VBR_1 produces sequences suitable for download. */
MP4RateControlType rcType;
/** @brief Sets the VBV buffer size (in the unit of second delay) used to prevent buffer overflow and underflow
* on the decoder side. This function is redundant to PVSetVBVSize. Either one of them is used at a time. */
float vbvDelay;
/** @brief Specifies whether frame skipping is permitted or not. When rate control type is set to CONSTANT_Q
* frame skipping is automatically banned. In CBR_1 and VBR_1 rate control, frame skipping is allowed by default.
* However, users can force no frame skipping with this flag, but buffer constraint may be violated.*/
ParamEncMode noFrameSkipped;
/** @brief Sets the maximum number of P-frames between two I-frames. I-frame mode is periodically forced
* if no I-frame is encoded after the specified period to add error resiliency and help resynchronize in case of errors.
* If scene change detection can add additional I-frame if new scenes are detected.
* intraPeriod is the I frame interval in terms of second.
* intraPeriod =0 indicates I-frame encoding only;
* intraPeriod = -1 indicates I-frame followed by all P-frames; (default)
* intraPeriod = N, indicates the number of P-frames between 2 I-frames.*/
Int intraPeriod;
/** @brief Specifies the number Intra MBs to be refreshed in a P-frame. */
Int numIntraMB;
/**
* @brief Specifies whether the scene change detection (SCD) is enabled or disabled.
* With SCD enable, when a new scene is detected, I-Vop mode will be used for the first frame of
* the new scene resulting in better picture quality. An insertion of an I-VOP resets the intraPeriod
* specified by the IntraPeriodAPI().*/
ParamEncMode sceneDetect;
/** @brief Specifies the search range of motion estimation search. Larger value implies
* larger search range, better motion vector match, but more complexity.
* If searchRange=n, the motion vector search is in the range of [-n,n-1] pixels.
* If half-pel mode is on, the range is [-n, (n-1)+1/2] pixels. The default value is 16.*/
Int searchRange;
/** @brief Turns on/off 8x8 block motion estimation and compensation.
* If on, four motion vectors may be used for motion estimation and compensation of a macroblock,
* otherwise one motion vector per macroblock is used. When the 8x8 MV is off, the total encoding complexity
* is less but the image quality is also worse. Therefore, it can be used in complexity limited environment.*/
ParamEncMode mv8x8Enable;
/** @brief Set the threshold for using intra DC VLC.
* Value must range from 0-7.*/
Int intraDCVlcTh;
/** @brief This flag turns on the use of AC prediction */
Bool useACPred;
} VideoEncOptions;
#ifdef __cplusplus
extern "C"
{
#endif
/* API's */
/* Always start with this one !!*/
/**
* @brief Gets default encoding options. This way users only have to set relevant encoding options and leave the one
* they are unsure of.
* @encOption Pointer to VideoEncOption structure.
* @encUseCase This value determines the set of default encoding options, for example, different encoding options
* are assigned to streaming use-case as compared to download use-case. It can be project dependent too.
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase);
/**
* @brief Verifies the consistency of encoding parameters, allocates memory needed and set necessary internal variables.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encCtrl, VideoEncOptions *encOption);
/* acquiring encoder info APIs */
/**
* @brief This function returns VOL header. It has to be called before the frame is encoded. If so,
* then the VOL Header is passed back to the application. Then all frames that are encoded do not contain the VOL Header.
* If you do not call the API then the VOL Header is passed within the first frame that is encoded.
* The behavior is unknown if it is called after the first frame is encoded. It is mainly used for MP4 file format authoring.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs.
* @param volHeader is the Buffer for VOL header.
* @param size is the size of VOL header in bytes.
* @param layer is the layer of the requested VOL header.
* @return true for correct operation; false if error happens.
*/
OSCL_IMPORT_REF Bool PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer);
/**
* @brief This function returns the profile and level in H.263 coding when the encoding parameters are set
* @param encCtrl is video encoder control structure that is always passed as input in all APIs.
* @param profileID is the pointer of the profile ID. Right now we only support profile 0
* @param levelID is the pointer of the level ID that could be 10-70.
* @return true for correct operation; false if error happens.
*/
OSCL_IMPORT_REF Bool PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID);
/**
* @brief This function returns the profile and level of MPEG4 when the encoding parameters are set
* @param encCtrl is video encoder control structure that is always passed as input in all APIs.
* @param profile_level is the pointer of the profile enumeration
* @param nLayer is the index of the layer of interest
* @return true for correct operation; false if error happens.
*/
OSCL_IMPORT_REF Bool PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer);
/**
* @brief This function returns maximum frame size in bytes
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param maxVideoFrameSize is the pointer of the maximum frame size
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize);
#ifndef LIMITED_API
/**
* @brief This function returns the total amount of memory (in bytes) allocated by the encoder library.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Int PVGetEncMemoryUsage(VideoEncControls *encCtrl);
/**
* @brief This function is used by PVAuthor to get the size of the VBV buffer.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param VBVSize is the pointer of The size of the VBV buffer in bytes.
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize);
#endif
/**
* @brief This function encodes a frame in YUV 4:2:0 format from the *video_in input frame and put the result in YUV
* for reconstructed frame and bstream for MPEG4 bitstream. The application is required to allocate memory for
* bitstream buffer.The size of the input bitstream memory and the returned output buffer are specified in the
* size field. The encoded layer is specified by the nLayer field. If the current frame is not encoded, size=0 and nLayer=-1.
* Note: If the allocated buffer size is too small to fit a bitstream of a frame, then those extra bits will be left out
* which can cause syntactic error at the decoder side.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param vid_in is the pointer to VideoEncFrameIO structure containing the YUV input data
* @param vid_out is the pointer to VideoEncFrameIO structure containing the reconstructed YUV output data after encoding
* @param nextModTime is the timestamp encoder expects from the next input
* @param bstream is the pointer to MPEG4 bitstream buffer
* @param size is the size of bitstream buffer allocated (input) and size of the encoded bitstream (output).
* @param nLayer is the layer of the encoded frame either 0 for base or 1 for enhancement layer. The value -1 indicates skipped frame due to buffer overflow.
* @return true newfor correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out,
ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer);
/**
* @brief This function is used to query overrun buffer. It is used when PVEncodeVideoFrame.returns size that is
* larger than the input size.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @return Pointer to the overrun buffer. NULL if overrun buffer is not used.
*/
OSCL_IMPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl);
#ifndef NO_SLICE_ENCODE /* This set of APIs are not working. This functionality has been partially
replaced by the introduction of overrun buffer. */
/* slice-based coding */
/**
* @brief This function sets the input YUV frame and timestamp to be encoded by the slice-based encoding function PVEncodeSlice().
* It also return the memory address the reconstructed frame will be copied to (in advance) and the coded layer number.
* The encoder library processes the timestamp and determine if this frame is to be encoded or not. If the current frame
* is not encoded, nLayer=-1. For frame-based motion estimation, the motion estimation of the entire frame is also performed
* in this function. For MB-based motion estimation, the motion vector is searched while coding each MB in PVEncodeSlice().
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param vid_in is the pointer to VideoEncFrameIO structure containing the YUV input data
* @param nextModTime is the timestamp encoder expects from the next input if this input is rejected and nLayer is set to -1.
* @param nLayer is the layer of the encoded frame either 0 for base or 1 for enhancement layer. The value -1 indicates skipped frame due to buffer overflow.
* @return true newfor correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer);
/**
* @brief This function encodes a GOB (short header mode) or a packet (data partitioning mode or combined mode with resync marker)
* and output the reconstructed frame and MPEG4 bitstream. The application is required to allocate memory for the bitstream buffer.
* The size of the input bitstream memory and the returned output buffer are specified in the size field. If the buffer size is
* smaller than the requested packet size, user has to call PVEncodeSlice again to get the rest of that pending packet before moving
* on to the next packet. For the combined mode without resync marker, the function returns when the buffer is full.
* The end-of-frame flag indicates the completion of the frame encoding. Next frame must be sent in with PVEncodeFrameSet().
* At the end-of-frame, the next video input address and the next video modulo timestamp will be set.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param bstream is the pointer to MPEG4 bitstream buffer.
* @param size is the size of bitstream buffer allocated (input) and size of the encoded bitstream (output).
* @param endofFrame is a flag indicating the end-of-frame, '1'. Otherwise, '0'. When PVSetNoCurrentFrameSkip is OFF,
* end-of-frame '-1' indicates current frame bitstream must be disregarded.
* @param vid_out is the pointer to VideoEncFrameIO structure containing the reconstructed YUV output data after encoding
* @param nextModTime is the timestamp encoder expects from the next input
* @return true newfor correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size,
Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime);
#endif
/**
* @brief This function returns MP4 file format hint track information.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param info is the structure for MP4 hint track information
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info);
#ifndef LIMITED_API
/**
* @brief updates target frame rates of the encoded base and enhance layer (if any) while encoding operation is ongoing.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param frameRate is the pointers to array of target frame rates in frames per second,
* frameRate[n] represents the n-th layer's target frame rate.
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate); /* for 2-way */
/**
* @brief updates target bit rates of the encoded base and enhance layer (if any) while encoding operation is ongoing.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param bitRate is the pointers to array of target bit rates in bits per second unit,
* bitRate[n] represents the n-th layer's target bit rate.
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate); /* for 2-way */
/**
* @brief updates the INTRA frame refresh interval while encoding operation is ongoing.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param aIFramePeriod is a new value of INTRA frame interval in the unit of number of coded frames.
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod);/* for 2-way */
/**
* @brief specifies the number Intra MBs to be refreshed
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @param numMB is the number of Intra MBs to be refreshed
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB); /* for 2-way */
/**
* @brief This function is called whenever users want the next base frame to be encoded as an I-Vop.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVIFrameRequest(VideoEncControls *encCtrl); /* for 2-way */
#endif // LIMITED_API
/* finishing encoder */
/**
* @brief This function frees up all the memory allocated by the encoder library.
* @param encCtrl is video encoder control structure that is always passed as input in all APIs
* @return true for correct operation; false if error happens
*/
OSCL_IMPORT_REF Bool PVCleanUpVideoEncoder(VideoEncControls *encCtrl);
#ifdef __cplusplus
}
#endif
#endif /* _MP4ENC_API_H_ */

View File

@ -0,0 +1,859 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/* Date: 8/02/04 */
/* Description: */
/* Change the bitstream parsing algorithm. Use temporary word of 2 or 4 bytes */
/* before writing it to the bitstream buffer. */
/* Note byteCount doesn't have to be multiple of 2 or 4 */
/*********************************************************************************/
#include "bitstream_io.h"
#include "m4venc_oscl.h"
#include <stdlib.h>
static const UChar Mask[ ] =
{
0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF
};
#define WORD_SIZE 4 /* for 32-bit machine */
/*Note:
1. There is a problem when output the last bits(which can not form a byte yet
so when you output, you need to stuff to make sure it is a byte
2. I now hard coded byte to be 8 bits*/
/* ======================================================================== */
/* Function : BitStreamCreateEnc(Int bufferSize ) */
/* Date : 08/29/2000 */
/* Purpose : Create a bitstream to hold one encoded video packet or frame */
/* In/out : */
/* bufferSize : size of the bitstream buffer in bytes */
/* Return : Pointer to the BitstreamEncVideo */
/* Modified : */
/* ======================================================================== */
BitstreamEncVideo *BitStreamCreateEnc(Int bufferSize)
{
BitstreamEncVideo *stream;
stream = (BitstreamEncVideo *) M4VENC_MALLOC(sizeof(BitstreamEncVideo));
if (stream == NULL)
{
return NULL;
}
stream->bufferSize = bufferSize;
stream->bitstreamBuffer = (UChar *) M4VENC_MALLOC(stream->bufferSize * sizeof(UChar));
if (stream->bitstreamBuffer == NULL)
{
M4VENC_FREE(stream);
stream = NULL;
return NULL;
}
M4VENC_MEMSET(stream->bitstreamBuffer, 0, stream->bufferSize*sizeof(UChar));
stream->word = 0;
#if WORD_SIZE==4
stream->bitLeft = 32;
#else
stream->bitLeft = 16;
#endif
stream->byteCount = 0;
stream->overrunBuffer = NULL;
stream->oBSize = 0;
return stream;
}
/* ======================================================================== */
/* Function : BitstreamCloseEnc( ) */
/* Date : 08/29/2000 */
/* Purpose : close a bitstream */
/* In/out :
stream : the bitstream to be closed */
/* Return : */
/* Modified : */
/* ======================================================================== */
Void BitstreamCloseEnc(BitstreamEncVideo *stream)
{
if (stream)
{
if (stream->bitstreamBuffer)
{
M4VENC_FREE(stream->bitstreamBuffer);
}
M4VENC_FREE(stream);
}
}
/* ======================================================================== */
/* Function : BitstreamPutBits(BitstreamEncVideo *stream, Int Length,
Int Value) */
/* Date : 08/29/2000 */
/* Purpose : put Length (1-16) number of bits to the stream */
/* for 32-bit machine this function can do upto 32 bit input */
/* In/out : */
/* stream the bitstream where the bits are put in */
/* Length bits length (should belong to 1 to 16) */
/* Value those bits value */
/* Return : PV_STATUS */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamPutBits(BitstreamEncVideo *stream, Int Length, UInt Value)
{
PV_STATUS status;
if (stream->bitLeft > Length)
{
stream->word <<= Length;
stream->word |= Value; /* assuming Value is not larger than Length */
stream->bitLeft -= Length;
return PV_SUCCESS;
}
else
{
stream->word <<= stream->bitLeft;
Length -= stream->bitLeft;
stream->word |= ((UInt)Value >> Length);
status = BitstreamSaveWord(stream);
if (status != PV_SUCCESS)
{
return status;
}
/* we got new Length and Value */
/* note that Value is not "clean" because of msb are not masked out */
stream->word = Value;
stream->bitLeft -= Length;
/* assuming that Length is no more than 16 bits */
/* stream->bitLeft should be greater than zero at this point */
//if(stream->bitLeft<=0)
// exit(-1);
return PV_SUCCESS;
}
}
/* ======================================================================== */
/* Function : BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, UInt32 Value) */
/* Date : 08/29/2000 */
/* Purpose : Use this function to put Length (17-32) number of bits to */
/* for 16-bit machine the stream. */
/* In/out : */
/* stream the bitstream where the bits are put in */
/* Length bits length (should belong to 17 to 32) */
/* Value those bits value */
/* Return : PV_STATUS */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, ULong Value)
{
PV_STATUS status;
UInt topValue;
Int topLength;
topValue = (Value >> 16);
topLength = Length - 16;
if (topLength > 0)
{
status = BitstreamPutBits(stream, topLength, topValue);
if (status != PV_SUCCESS)
{
return status;
}
status = BitstreamPutBits(stream, 16, (UInt)(Value & 0xFFFF));
return status;
}
else
{
status = BitstreamPutBits(stream, Length, (UInt)Value);
return status;
}
}
/* ======================================================================== */
/* Function : BitstreamSaveWord */
/* Date : 08/03/2004 */
/* Purpose : save written word into the bitstream buffer. */
/* In/out : */
/* stream the bitstream where the bits are put in */
/* Return : PV_STATUS */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamSaveWord(BitstreamEncVideo *stream)
{
UChar *ptr;
UInt word;
/* assume that stream->bitLeft is always zero when this function is called */
if (stream->byteCount + WORD_SIZE > stream->bufferSize)
{
if (PV_SUCCESS != BitstreamUseOverrunBuffer(stream, WORD_SIZE))
{
stream->byteCount += WORD_SIZE;
return PV_FAIL;
}
}
ptr = stream->bitstreamBuffer + stream->byteCount;
word = stream->word;
stream->word = 0; /* important to reset to zero */
/* NOTE: byteCount does not have to be multiple of 2 or 4 */
#if (WORD_SIZE == 4)
*ptr++ = word >> 24;
*ptr++ = 0xFF & (word >> 16);
#endif
*ptr++ = 0xFF & (word >> 8);
*ptr = 0xFF & word;
#if (WORD_SIZE == 4)
stream->byteCount += 4;
stream->bitLeft = 32;
#else
stream->byteCount += 2;
stream->bitLeft = 16;
#endif
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : BitstreamSavePartial */
/* Date : 08/03/2004 */
/* Purpose : save unfinished written word into the bitstream buffer. */
/* In/out : */
/* stream the bitstream where the bits are put in */
/* Return : PV_STATUS */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamSavePartial(BitstreamEncVideo *stream, Int *fraction)
{
UChar *ptr;
UInt word, shift;
Int numbyte, bitleft, bitused;
bitleft = stream->bitLeft;
bitused = (WORD_SIZE << 3) - bitleft; /* number of bits used */
numbyte = bitused >> 3; /* number of byte fully used */
if (stream->byteCount + numbyte > stream->bufferSize)
{
if (PV_SUCCESS != BitstreamUseOverrunBuffer(stream, numbyte))
{
stream->byteCount += numbyte;
return PV_FAIL;
}
}
ptr = stream->bitstreamBuffer + stream->byteCount;
word = stream->word;
word <<= bitleft; /* word is not all consumed */
bitleft = bitused - (numbyte << 3); /* number of bits used (fraction) */
stream->byteCount += numbyte;
if (bitleft)
{
*fraction = 1;
}
else
{
*fraction = 0;
}
bitleft = (WORD_SIZE << 3) - bitleft;
/* save new value */
stream->bitLeft = bitleft;
shift = ((WORD_SIZE - 1) << 3);
while (numbyte)
{
*ptr++ = (UChar)((word >> shift) & 0xFF);
word <<= 8;
numbyte--;
}
if (*fraction)
{// this could lead to buffer overrun when ptr is already out of bound.
// *ptr = (UChar)((word>>shift)&0xFF); /* need to do it for the last fractional byte */
}
/* save new values */
stream->word = word >> bitleft;
/* note we don't update byteCount, bitLeft and word */
/* so that encoder can continue PutBits if they don't */
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : BitstreamShortHeaderByteAlignStuffing( */
/* BitstreamEncVideo *stream) */
/* Date : 08/29/2000 */
/* Purpose : bit stuffing for next start code in short video header */
/* In/out : */
/* Return : number of bits to be stuffed */
/* Modified : */
/* ======================================================================== */
Int BitstreamShortHeaderByteAlignStuffing(BitstreamEncVideo *stream)
{
UInt restBits;
Int fraction;
restBits = (stream->bitLeft & 0x7); /* modulo 8 */
if (restBits) /*short_video_header[0] is 1 in h263 baseline*/
{
/* H.263 style stuffing */
BitstreamPutBits(stream, restBits, 0);
}
if (stream->bitLeft != (WORD_SIZE << 3))
{
BitstreamSavePartial(stream, &fraction);
}
return restBits;
}
/* ======================================================================== */
/* Function : BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream) */
/* Date : 08/29/2000 */
/* Purpose : bit stuffing for next start code in MPEG-4 */
/* In/out : */
/* Return : number of bits to be stuffed */
/* Modified : */
/* ======================================================================== */
Int BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream)
{
UInt restBits;
Int fraction;
/* Question: in MPEG-4 , short_video_header[0]==0 => even already byte aligned, will still stuff 8 bits
need to check with */
/*if (!(getPointerENC(index1, index2)%8) && short_video_header[0]) return 0;*/
/* need stuffing bits, */
BitstreamPutBits(stream, 1, 0);
restBits = (stream->bitLeft & 0x7); /* modulo 8 */
if (restBits) /*short_video_header[0] is 1 in h263 baseline*/
{
/* need stuffing bits, */
BitstreamPutBits(stream, restBits, Mask[restBits]);
}
if (stream->bitLeft != (WORD_SIZE << 3))
{
BitstreamSavePartial(stream, &fraction);
}
return (restBits);
}
/*does bit stuffing for next resync marker*/
/* does bit stuffing for next resync marker
* "0"
* "01"
* "011"
* "0111"
* "01111"
* "011111"
* "0111111"
* "01111111" (8-bit codeword)
*/
/*Int BitstreamNextResyncMarkerEnc(BitstreamEncVideo *stream)
{
Int count;
BitstreamPut1Bits(stream,0);
count=8-stream->totalBits & 8;
BitstreamPutBits(stream,count,Mask[count]);
return count;
}*/
/* ======================================================================== */
/* Function : BitstreamAppendEnc( BitstreamEncVideo *bitstream1, */
/* BitstreamEncVideo *bitstream2 ) */
/* Date : 08/29/2000 */
/* Purpose : Append the intermediate bitstream (bitstream2) to the end of */
/* output bitstream(bitstream1) */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamAppendEnc(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)
{
PV_STATUS status;
UChar *ptrBS2, *ptrBS1;
UChar byteBS2, byteBS1;
Int numbyte2;
Int bitused, bitleft, offset, fraction;
status = BitstreamSavePartial(bitstream1, &fraction);
if (status != PV_SUCCESS)
{
return status;
}
offset = fraction;
status = BitstreamSavePartial(bitstream2, &fraction);
if (status != PV_SUCCESS)
{
return status;
}
if (!offset) /* bitstream1 is byte-aligned */
{
return BitstreamAppendPacket(bitstream1, bitstream2);
}
offset += fraction;
/* since bitstream1 doesn't have to be byte-aligned, we have to process byte by byte */
/* we read one byte from bitstream2 and use BitstreamPutBits to do the job */
if (bitstream1->byteCount + bitstream2->byteCount + offset > bitstream1->bufferSize)
{
if (PV_SUCCESS != BitstreamUseOverrunBuffer(bitstream1, bitstream2->byteCount + offset))
{
bitstream1->byteCount += (bitstream2->byteCount + offset);
return PV_FAIL;
}
}
ptrBS1 = bitstream1->bitstreamBuffer + bitstream1->byteCount; /* move ptr bs1*/
ptrBS2 = bitstream2->bitstreamBuffer;
bitused = (WORD_SIZE << 3) - bitstream1->bitLeft; /* this must be between 1-7 */
bitleft = 8 - bitused;
numbyte2 = bitstream2->byteCount; /* number of byte to copy from bs2 */
bitstream1->byteCount += numbyte2; /* new byteCount */
byteBS1 = ((UChar) bitstream1->word) << bitleft; /* fraction byte from bs1 */
while (numbyte2)
{
byteBS2 = *ptrBS2++;
byteBS1 |= (byteBS2 >> bitused);
*ptrBS1++ = byteBS1;
byteBS1 = byteBS2 << bitleft;
numbyte2--;
}
bitstream1->word = byteBS1 >> bitleft; /* bitstream->bitLeft remains the same */
/* now save bs2->word in bs1 */
status = BitstreamPutBits(bitstream1, (WORD_SIZE << 3) - bitstream2->bitLeft, bitstream2->word);
return status;
}
/* ======================================================================== */
/* Function : BitstreamAppendPacket( BitstreamEncVideo *bitstream1, */
/* BitstreamEncVideo *bitstream2 ) */
/* Date : 05/31/2001 */
/* Purpose : Append the intermediate bitstream (bitstream2) to the end of */
/* output bitstream(bitstream1) knowing that bitstream1 is byte-aligned*/
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamAppendPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)
{
UChar *ptrBS2, *ptrBS1;
Int numbyte2;
if (bitstream1->byteCount + bitstream2->byteCount > bitstream1->bufferSize)
{
if (PV_SUCCESS != BitstreamUseOverrunBuffer(bitstream1, bitstream2->byteCount))
{
bitstream1->byteCount += bitstream2->byteCount; /* legacy, to keep track of total bytes */
return PV_FAIL;
}
}
ptrBS1 = bitstream1->bitstreamBuffer + bitstream1->byteCount; /* move ptr bs1*/
ptrBS2 = bitstream2->bitstreamBuffer;
numbyte2 = bitstream2->byteCount;
bitstream1->byteCount += numbyte2; /* new byteCount */
/*copy all the bytes in bitstream2*/
M4VENC_MEMCPY(ptrBS1, ptrBS2, sizeof(UChar)*numbyte2);
bitstream1->word = bitstream2->word; /* bitstream1->bitLeft is the same */
bitstream1->bitLeft = bitstream2->bitLeft;
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : BitstreamAppendPacketNoOffset( BitstreamEncVideo *bitstream1,*/
/* BitstreamEncVideo *bitstream2 ) */
/* Date : 04/23/2002 */
/* Purpose : Append the intermediate bitstream (bitstream2) to the end of */
/* output bitstream(bitstream1) , for slice-based coding only */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamAppendPacketNoOffset(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)
{
PV_STATUS status = PV_SUCCESS;
UChar *ptrBS2, *ptrBS1;
Int numbyte2;
Int byteleft;
numbyte2 = bitstream2->byteCount;
if (bitstream1->byteCount + bitstream2->byteCount > bitstream1->bufferSize)
{
numbyte2 = bitstream1->bufferSize - bitstream1->byteCount;
status = PV_END_OF_BUF; /* signal end of buffer */
}
ptrBS1 = bitstream1->bitstreamBuffer; /* move ptr bs1*/
ptrBS2 = bitstream2->bitstreamBuffer;
bitstream1->byteCount += numbyte2; /* should be equal to bufferSize */
/*copy all the bytes in bitstream2*/
M4VENC_MEMCPY(ptrBS1, ptrBS2, sizeof(UChar)*numbyte2);
bitstream1->word = 0;
bitstream1->bitLeft = (WORD_SIZE << 3);
if (status == PV_END_OF_BUF) /* re-position bitstream2 */
{
byteleft = bitstream2->byteCount - numbyte2;
M4VENC_MEMCPY(ptrBS2, ptrBS2 + numbyte2, sizeof(UChar)*byteleft);
bitstream2->byteCount = byteleft;
/* bitstream2->word and bitstream->bitLeft are unchanged.
they should be 0 and (WORD_SIZE<<3) */
}
return status;
}
#ifndef NO_SLICE_ENCODE
/* ======================================================================== */
/* Function : BitstreamRepos( BitstreamEncVideo *bitstream, */
/* Int byteCount, Int bitCount) */
/* Date : 04/28/2002 */
/* Purpose : Reposition the size of the buffer content (curtail) */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamRepos(BitstreamEncVideo *bitstream, Int byteCount, Int bitCount)
{
UChar *ptr, byte;
UInt word;
Int fraction;
BitstreamSavePartial(bitstream, &fraction);
bitstream->byteCount = byteCount;
ptr = bitstream->bitstreamBuffer + byteCount; /* get fraction of the byte */
if (bitCount)
{
bitstream->bitLeft = (WORD_SIZE << 3) - bitCount; /* bitCount should be 0-31 */
word = *ptr++;
byte = *ptr++;
word = byte | (word << 8);
#if (WORD_SIZE == 4)
byte = *ptr++;
word = byte | (word << 8);
byte = *ptr++;
word = byte | (word << 8);
#endif
bitstream->word = word >> (bitstream->bitLeft);
}
else
{
bitstream->word = 0;
bitstream->bitLeft = (WORD_SIZE << 3);
}
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : BitstreamFlushBits(BitstreamEncVideo *bitstream1, */
/* Int num_bit_left) */
/* Date : 04/24/2002 */
/* Purpose : Flush buffer except the last num_bit_left bits. */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamFlushBits(BitstreamEncVideo *bitstream1, Int num_bit_left)
{
Int i;
UChar *ptrDst, *ptrSrc;
Int leftover, bitused;
Int new_byte = (num_bit_left >> 3);
Int new_bit = num_bit_left - (new_byte << 3); /* between 0-7 */
ptrSrc = bitstream1->bitstreamBuffer + bitstream1->byteCount;
ptrDst = bitstream1->bitstreamBuffer;
bitused = (WORD_SIZE << 3) - bitstream1->bitLeft;
leftover = 8 - bitused; /* bitused should be between 0-7 */
bitstream1->byteCount = new_byte;
bitstream1->bitLeft = (WORD_SIZE << 3) - new_bit;
if (!bitused) /* byte aligned */
{
M4VENC_MEMCPY(ptrDst, ptrSrc, new_byte + 1);
}
else
{
/*copy all the bytes in bitstream2*/
for (i = 0; i < new_byte; i++)
{
*ptrDst++ = (ptrSrc[0] << bitused) | (ptrSrc[1] >> leftover);
ptrSrc++;
}
/* copy for the last byte of ptrSrc, copy extra bits doesn't hurt */
if (new_bit)
{
*ptrDst++ = (ptrSrc[0] << bitused) | (ptrSrc[1] >> leftover);
ptrSrc++;
}
}
if (new_bit)
{
ptrSrc = bitstream1->bitstreamBuffer + new_byte;
bitstream1->word = (*ptrSrc) >> (8 - new_bit);
}
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : BitstreamPrependPacket( BitstreamEncVideo *bitstream1, */
/* BitstreamEncVideo *bitstream2 ) */
/* Date : 04/26/2002 */
/* Purpose : Prepend the intermediate bitstream (bitstream2) to the beginning of */
/* output bitstream(bitstream1) */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS BitstreamPrependPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)
{
UChar *pSrc, *pDst, byte;
Int movebyte, bitused, leftover, i, fraction;
BitstreamSavePartial(bitstream2, &fraction); /* make sure only fraction of byte left */
BitstreamSavePartial(bitstream1, &fraction);
if (bitstream1->byteCount + bitstream2->byteCount >= bitstream1->bufferSize)
{
bitstream1->byteCount += bitstream2->byteCount;
return PV_END_OF_BUF;
}
movebyte = bitstream1->byteCount;
if (movebyte < bitstream2->byteCount)
movebyte = bitstream2->byteCount;
movebyte++;
/* shift bitstream1 to the right by movebyte */
pSrc = bitstream1->bitstreamBuffer;
pDst = pSrc + movebyte;
M4VENC_MEMCPY(pDst, pSrc, bitstream1->byteCount + 1);
/* copy bitstream2 to the beginning of bitstream1 */
M4VENC_MEMCPY(pSrc, bitstream2->bitstreamBuffer, bitstream2->byteCount + 1);
/* now shift back previous bitstream1 buffer to the end */
pSrc = pDst;
pDst = bitstream1->bitstreamBuffer + bitstream2->byteCount;
bitused = (WORD_SIZE << 3) - bitstream2->bitLeft;
leftover = 8 - bitused; /* bitused should be 0-7 */
byte = (bitstream2->word) << leftover;
*pDst++ = byte | (pSrc[0] >> bitused);
for (i = 0; i < bitstream1->byteCount + 1; i++)
{
*pDst++ = ((pSrc[0] << leftover) | (pSrc[1] >> bitused));
pSrc++;
}
bitstream1->byteCount += bitstream2->byteCount;
//bitstream1->bitCount += bitstream2->bitCount;
bitused = (WORD_SIZE << 4) - (bitstream1->bitLeft + bitstream2->bitLeft);
if (bitused >= 8)
{
bitused -= 8;
bitstream1->byteCount++;
}
bitstream1->bitLeft = (WORD_SIZE << 3) - bitused;
bitstream2->byteCount = bitstream2->word = 0;
bitstream2->bitLeft = (WORD_SIZE << 3);
pSrc = bitstream1->bitstreamBuffer + bitstream1->byteCount;
leftover = 8 - bitused;
//*pSrc = (pSrc[0]>>leftover)<<leftover; /* make sure the rest of bits are zeros */
bitstream1->word = (UInt)((pSrc[0]) >> leftover);
return PV_SUCCESS;
}
#endif /* NO_SLICE_ENCODE */
/* ======================================================================== */
/* Function : BitstreamGetPos( BitstreamEncVideo *stream */
/* Date : 08/05/2004 */
/* Purpose : Get the bit position. */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
Int BitstreamGetPos(BitstreamEncVideo *stream)
{
return stream->byteCount*8 + (WORD_SIZE << 3) - stream->bitLeft;
}
void BitstreamEncReset(BitstreamEncVideo *stream)
{
stream->bitLeft = (WORD_SIZE << 3);
stream->word = 0;
stream->byteCount = 0;
return ;
}
/* This function set the overrun buffer, and VideoEncData context for callback to reallocate
overrun buffer. */
Void BitstreamSetOverrunBuffer(BitstreamEncVideo* stream, UChar* overrunBuffer, Int oBSize, VideoEncData *video)
{
stream->overrunBuffer = overrunBuffer;
stream->oBSize = oBSize;
stream->video = video;
return ;
}
/* determine whether overrun buffer can be used or not */
PV_STATUS BitstreamUseOverrunBuffer(BitstreamEncVideo* stream, Int numExtraBytes)
{
VideoEncData *video = stream->video;
if (stream->overrunBuffer != NULL) // overrunBuffer is set
{
if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used
{
if (stream->byteCount + numExtraBytes >= stream->oBSize)
{
stream->oBSize = stream->byteCount + numExtraBytes + 100;
stream->oBSize &= (~0x3); // make it multiple of 4
// allocate new overrun Buffer
if (video->overrunBuffer)
{
M4VENC_FREE(video->overrunBuffer);
}
video->oBSize = stream->oBSize;
video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * stream->oBSize);
stream->overrunBuffer = video->overrunBuffer;
if (stream->overrunBuffer == NULL)
{
return PV_FAIL;
}
}
// copy everything to overrun buffer and start using it.
memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->byteCount);
stream->bitstreamBuffer = stream->overrunBuffer;
stream->bufferSize = stream->oBSize;
}
else // overrun buffer is already used
{
if (stream->byteCount + numExtraBytes >= stream->oBSize)
{
stream->oBSize = stream->byteCount + numExtraBytes + 100;
}
// allocate new overrun buffer
stream->oBSize &= (~0x3); // make it multiple of 4
video->oBSize = stream->oBSize;
video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * stream->oBSize);
if (video->overrunBuffer == NULL)
{
return PV_FAIL;
}
// copy from the old buffer to new buffer
memcpy(video->overrunBuffer, stream->overrunBuffer, stream->byteCount);
// free old buffer
M4VENC_FREE(stream->overrunBuffer);
// assign pointer to new buffer
stream->overrunBuffer = video->overrunBuffer;
stream->bitstreamBuffer = stream->overrunBuffer;
stream->bufferSize = stream->oBSize;
}
return PV_SUCCESS;
}
else // overrunBuffer is not enable.
{
return PV_FAIL;
}
}

View File

@ -0,0 +1,57 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _BITSTREAM_IO_H_
#define _BITSTREAM_IO_H_
#define BitstreamPut1Bits(x,y) BitstreamPutBits(x,1,y)
#define BitstreamPutGT8Bits(x,y,z) BitstreamPutBits(x,y,z)
#include "mp4lib_int.h"
#ifdef __cplusplus
extern "C"
{
#endif
BitstreamEncVideo *BitStreamCreateEnc(Int bufferSize);
Void BitstreamCloseEnc(BitstreamEncVideo *stream);
PV_STATUS BitstreamPutBits(BitstreamEncVideo *stream, Int Length, UInt Value);
PV_STATUS BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, ULong Value);
PV_STATUS BitstreamSaveWord(BitstreamEncVideo *stream);
PV_STATUS BitstreamSavePartial(BitstreamEncVideo *stream, Int *fraction);
Int BitstreamGetPos(BitstreamEncVideo *stream);
void BitstreamEncReset(BitstreamEncVideo *stream);
Int BitstreamShortHeaderByteAlignStuffing(BitstreamEncVideo *stream);
Int BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream);
PV_STATUS BitstreamAppendEnc(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);
PV_STATUS BitstreamAppendPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);
PV_STATUS BitstreamAppendPacketNoOffset(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);
PV_STATUS BitstreamRepos(BitstreamEncVideo *bitstream, Int byteCount, Int bitCount);
PV_STATUS BitstreamFlushBits(BitstreamEncVideo *bitstream1, Int num_bit_left);
PV_STATUS BitstreamPrependPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);
Void BitstreamSetOverrunBuffer(BitstreamEncVideo *stream, UChar *overrunBuffer, Int oBSize, VideoEncData *video);
PV_STATUS BitstreamUseOverrunBuffer(BitstreamEncVideo* stream, Int numExtraBytes);
#ifdef __cplusplus
}
#endif
#endif /* _BITSTREAM_IO_H_ */

View File

@ -0,0 +1,693 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4enc_lib.h"
#include "mp4lib_int.h"
#include "bitstream_io.h"
#include "vlc_encode.h"
#include "m4venc_oscl.h"
PV_STATUS EncodeGOBHeader(VideoEncData *video, Int GOB_number, Int quant_scale, Int bs1stream);
/* ======================================================================== */
/* Function : EncodeFrameCombinedMode() */
/* Date : 09/01/2000 */
/* History : */
/* Purpose : Encode a frame of MPEG4 bitstream in Combined mode. */
/* In/out : */
/* Return : PV_SUCCESS if successful else PV_FAIL */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeFrameCombinedMode(VideoEncData *video)
{
PV_STATUS status = PV_SUCCESS;
Vol *currVol = video->vol[video->currLayer];
Vop *currVop = video->currVop;
VideoEncParams *encParams = video->encParams;
Int width = currVop->width; /* has to be Vop, for multiple of 16 */
Int lx = currVop->pitch; /* with padding */
Int offset = 0;
Int ind_x, ind_y;
Int start_packet_header = 0;
UChar *QPMB = video->QPMB;
Int QP;
Int mbnum = 0, slice_counter = 0, curr_slice_counter = 0;
Int num_bits, packet_size = encParams->ResyncPacketsize;
Int GOB_Header_Interval = encParams->GOB_Header_Interval;
BitstreamEncVideo *bs1 = video->bitstream1;
Int numHeaderBits;
approxDCT fastDCTfunction;
Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */
PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);
void (*MBVlcEncode)(VideoEncData*, Int[], void *);
void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);
/* for H263 GOB changes */
//MP4RateControlType rc_type = encParams->RC_Type;
video->QP_prev = currVop->quantizer;
numHeaderBits = BitstreamGetPos(bs1);
/* determine type of quantization */
#ifndef NO_MPEG_QUANT
if (currVol->quantType == 0)
CodeMB = &CodeMB_H263;
else
CodeMB = &CodeMB_MPEG;
#else
CodeMB = &CodeMB_H263;
#endif
/* determine which functions to be used, in MB-level */
if (currVop->predictionType == P_VOP)
MBVlcEncode = &MBVlcEncodeCombined_P_VOP;
else if (currVop->predictionType == I_VOP)
MBVlcEncode = &MBVlcEncodeCombined_I_VOP;
else /* B_VOP not implemented yet */
return PV_FAIL;
/* determine which VLC table to be used */
#ifndef H263_ONLY
if (currVol->shortVideoHeader)
BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;
#ifndef NO_RVLC
else if (currVol->useReverseVLC)
BlockCodeCoeff = &BlockCodeCoeff_RVLC;
#endif
else
BlockCodeCoeff = &BlockCodeCoeff_Normal;
#else
BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;
#endif
/* gob_frame_id is the same for different vop types - the reason should be SCD */
if (currVol->shortVideoHeader && currVop->gobFrameID != currVop->predictionType)
currVop->gobFrameID = currVop->predictionType;
video->usePrevQP = 0;
for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */
{
video->outputMB->mb_y = ind_y; /* 5/28/01 */
if (currVol->shortVideoHeader) /* ShortVideoHeader Mode */
{
if (slice_counter && GOB_Header_Interval && (ind_y % GOB_Header_Interval == 0)) /* Encode GOB Header */
{
QP = QPMB[mbnum]; /* Get quant_scale */
video->header_bits -= BitstreamGetPos(currVol->stream); /* Header Bits */
status = EncodeGOBHeader(video, slice_counter, QP, 0); //ind_y /* Encode GOB Header */
video->header_bits += BitstreamGetPos(currVol->stream); /* Header Bits */
curr_slice_counter = slice_counter;
}
}
for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */
{
video->outputMB->mb_x = ind_x; /* 5/28/01 */
video->mbnum = mbnum;
QP = QPMB[mbnum]; /* always read new QP */
if (GOB_Header_Interval)
video->sliceNo[mbnum] = curr_slice_counter; /* Update MB slice number */
else
video->sliceNo[mbnum] = slice_counter;
/****************************************************************************************/
/* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */
/****************************************************************************************/
getMotionCompensatedMB(video, ind_x, ind_y, offset);
#ifndef H263_ONLY
if (start_packet_header)
{
slice_counter++; /* Increment slice counter */
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/
video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */
video->QP_prev = currVop->quantizer;
status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0);
video->header_bits += BitstreamGetPos(bs1); /* Header Bits */
numHeaderBits = BitstreamGetPos(bs1);
start_packet_header = 0;
video->usePrevQP = 0;
}
#endif
/***********************************************/
/* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */
/***********************************************/
status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);
/************************************/
/* MB VLC Encode: VLC Encode MB */
/************************************/
(*MBVlcEncode)(video, ncoefblck, (void*)BlockCodeCoeff);
/*************************************************************/
/* Assemble Packets: Assemble the MB VLC codes into Packets */
/*************************************************************/
/* Assemble_Packet(video) */
#ifndef H263_ONLY
if (!currVol->shortVideoHeader) /* Not in ShortVideoHeader mode */
{
if (!currVol->ResyncMarkerDisable) /* RESYNC MARKER MODE */
{
num_bits = BitstreamGetPos(bs1) - numHeaderBits;
if (num_bits > packet_size)
{
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */
status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */
/* continue even if status == PV_END_OF_BUF, to get the stats */
BitstreamEncReset(bs1);
start_packet_header = 1;
}
}
else /* NO RESYNC MARKER MODE */
{
status = BitstreamAppendEnc(currVol->stream, bs1); /* Initialize to 0 */
/* continue even if status == PV_END_OF_BUF, to get the stats */
BitstreamEncReset(bs1);
}
}
else
#endif /* H263_ONLY */
{ /* ShortVideoHeader Mode */
status = BitstreamAppendEnc(currVol->stream, bs1); /* Initialize to 0 */
/* continue even if status == PV_END_OF_BUF, to get the stats */
BitstreamEncReset(bs1);
}
mbnum++;
offset += 16;
} /* End of For ind_x */
offset += (lx << 4) - width;
if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */
{
if (GOB_Header_Interval) slice_counter++;
}
} /* End of For ind_y */
if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */
{
video->header_bits += BitstreamShortHeaderByteAlignStuffing(currVol->stream); /* Byte Align */
}
#ifndef H263_ONLY
else /* Combined Mode*/
{
if (!currVol->ResyncMarkerDisable) /* Resync Markers */
{
if (!start_packet_header)
{
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1);/* Byte Align */
status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */
/* continue even if status == PV_END_OF_BUF, to get the stats */
BitstreamEncReset(bs1);
}
}
else /* No Resync Markers */
{
video->header_bits += BitstreamMpeg4ByteAlignStuffing(currVol->stream); /* Byte Align */
}
}
#endif /* H263_ONLY */
return status; /* if status == PV_END_OF_BUF, this frame will be pre-skipped */
}
#ifndef NO_SLICE_ENCODE
/* ======================================================================== */
/* Function : EncodeSliceCombinedMode() */
/* Date : 04/19/2002 */
/* History : */
/* Purpose : Encode a slice of MPEG4 bitstream in Combined mode and save */
/* the current MB to continue next time it is called. */
/* In/out : */
/* Return : PV_SUCCESS if successful else PV_FAIL */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeSliceCombinedMode(VideoEncData *video)
{
PV_STATUS status = PV_SUCCESS;
Vol *currVol = video->vol[video->currLayer];
Vop *currVop = video->currVop;
UChar mode = MODE_INTRA;
UChar *Mode = video->headerInfo.Mode;
VideoEncParams *encParams = video->encParams;
Int nTotalMB = currVol->nTotalMB;
Int width = currVop->width; /* has to be Vop, for multiple of 16 */
Int lx = currVop->pitch; /* , with padding */
// rateControl *rc = encParams->rc[video->currLayer];
UChar *QPMB = video->QPMB;
Int QP;
Int ind_x = video->outputMB->mb_x, ind_y = video->outputMB->mb_y;
Int offset = video->offset; /* get current MB location */
Int mbnum = video->mbnum, slice_counter = video->sliceNo[mbnum]; /* get current MB location */
Int firstMB = mbnum;
Int start_packet_header = 0;
Int num_bits = 0;
Int packet_size = encParams->ResyncPacketsize - 1;
Int resync_marker = ((!currVol->shortVideoHeader) && (!currVol->ResyncMarkerDisable));
BitstreamEncVideo *bs1 = video->bitstream1;
Int byteCount = 0, byteCount1 = 0, bitCount = 0;
Int numHeaderBits = 0;
approxDCT fastDCTfunction;
Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */
UChar CBP = 0;
Short outputMB[6][64];
Int k;
PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);
void (*MBVlcEncode)(VideoEncData*, Int[], void *);
void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);
video->QP_prev = 31;
#define H263_GOB_CHANGES
if (video->end_of_buf) /* left-over from previous run */
{
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
if (status != PV_END_OF_BUF)
{
BitstreamEncReset(bs1);
video->end_of_buf = 0;
}
return status;
}
if (mbnum == 0) /* only do this at the start of a frame */
{
QPMB[0] = video->QP_prev = QP = currVop->quantizer;
video->usePrevQP = 0;
numHeaderBits = BitstreamGetPos(bs1);
}
/* Re-assign fast functions on every slice, don't have to put it in the memory */
QP = QPMB[mbnum];
if (mbnum > 0) video->QP_prev = QPMB[mbnum-1];
/* determine type of quantization */
#ifndef NO_MPEG_QUANT
if (currVol->quantType == 0)
CodeMB = &CodeMB_H263;
else
CodeMB = &CodeMB_MPEG;
#else
CodeMB = &CodeMB_H263;
#endif
/* determine which functions to be used, in MB-level */
if (currVop->predictionType == P_VOP)
MBVlcEncode = &MBVlcEncodeCombined_P_VOP;
else if (currVop->predictionType == I_VOP)
MBVlcEncode = &MBVlcEncodeCombined_I_VOP;
else /* B_VOP not implemented yet */
return PV_FAIL;
/* determine which VLC table to be used */
#ifndef H263_ONLY
if (currVol->shortVideoHeader)
BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;
#ifndef NO_RVLC
else if (currVol->useReverseVLC)
BlockCodeCoeff = &BlockCodeCoeff_RVLC;
#endif
else
BlockCodeCoeff = &BlockCodeCoeff_Normal;
#else
BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;
#endif
/* (gob_frame_id is the same for different vop types) The reason should be SCD */
if (currVol->shortVideoHeader && currVop->gobFrameID != currVop->predictionType)
currVop->gobFrameID = currVop->predictionType;
if (mbnum != 0)
{
if (currVol->shortVideoHeader)
{
/* Encode GOB Header */
bitCount = BitstreamGetPos(bs1);
byteCount1 = byteCount = bitCount >> 3; /* save the position before GOB header */
bitCount = bitCount & 0x7;
#ifdef H263_GOB_CHANGES
video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */
status = EncodeGOBHeader(video, slice_counter, QP, 1); //ind_y /* Encode GOB Header */
video->header_bits += BitstreamGetPos(bs1); /* Header Bits */
#endif
goto JUMP_IN_SH;
}
else if (currVol->ResyncMarkerDisable)
{
goto JUMP_IN_SH;
}
else
{
start_packet_header = 1;
goto JUMP_IN;
}
}
for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */
{
video->outputMB->mb_y = ind_y; /* 5/28/01, do not remove */
for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */
{
video->outputMB->mb_x = ind_x; /* 5/28/01, do not remove */
video->mbnum = mbnum;
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number */
JUMP_IN_SH:
/****************************************************************************************/
/* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */
/****************************************************************************************/
getMotionCompensatedMB(video, ind_x, ind_y, offset);
JUMP_IN:
QP = QPMB[mbnum]; /* always read new QP */
#ifndef H263_ONLY
if (start_packet_header)
{
slice_counter++; /* Increment slice counter */
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/
video->QP_prev = currVop->quantizer; /* store QP */
num_bits = BitstreamGetPos(bs1);
status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 1);
numHeaderBits = BitstreamGetPos(bs1) - num_bits;
video->header_bits += numHeaderBits; /* Header Bits */
start_packet_header = 0;
video->usePrevQP = 0;
}
else /* don't encode the first MB in packet again */
#endif /* H263_ONLY */
{
/***********************************************/
/* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */
/***********************************************/
status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);
}
/************************************/
/* MB VLC Encode: VLC Encode MB */
/************************************/
/* save the state before VLC encoding */
if (resync_marker)
{
bitCount = BitstreamGetPos(bs1);
byteCount = bitCount >> 3; /* save the state before encoding */
bitCount = bitCount & 0x7;
mode = Mode[mbnum];
CBP = video->headerInfo.CBP[mbnum];
for (k = 0; k < 6; k++)
{
M4VENC_MEMCPY(outputMB[k], video->outputMB->block[k], sizeof(Short) << 6);
}
}
/*************************************/
(*MBVlcEncode)(video, ncoefblck, (void*)BlockCodeCoeff);
/*************************************************************/
/* Assemble Packets: Assemble the MB VLC codes into Packets */
/*************************************************************/
/* Assemble_Packet(video) */
#ifndef H263_ONLY
if (!currVol->shortVideoHeader)
{
if (!currVol->ResyncMarkerDisable)
{
/* Not in ShortVideoHeader mode and RESYNC MARKER MODE */
num_bits = BitstreamGetPos(bs1) ;//- numHeaderBits; // include header
/* Assemble packet and return when size reached */
if (num_bits > packet_size && mbnum != firstMB)
{
BitstreamRepos(bs1, byteCount, bitCount); /* rewind one MB */
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Put Packet to Buffer */
if (status == PV_END_OF_BUF)
{
video->end_of_buf = 1;
}
else
{
BitstreamEncReset(bs1);
}
start_packet_header = 1;
if (mbnum < nTotalMB || video->end_of_buf) /* return here */
{
video->mbnum = mbnum;
video->sliceNo[mbnum] = slice_counter;
video->offset = offset;
Mode[mbnum] = mode;
video->headerInfo.CBP[mbnum] = CBP;
for (k = 0; k < 6; k++)
{
M4VENC_MEMCPY(video->outputMB->block[k], outputMB[k], sizeof(Short) << 6);
}
return status;
}
}
}
else /* NO RESYNC MARKER , return when buffer is full*/
{
if (mbnum < nTotalMB - 1 && currVol->stream->byteCount + bs1->byteCount + 1 >= currVol->stream->bufferSize)
{
/* find maximum bytes to fit in the buffer */
byteCount = currVol->stream->bufferSize - currVol->stream->byteCount - 1;
num_bits = BitstreamGetPos(bs1) - (byteCount << 3);
BitstreamRepos(bs1, byteCount, 0);
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
BitstreamFlushBits(bs1, num_bits);
/* move on to next MB */
mbnum++ ;
offset += 16;
video->outputMB->mb_x++;
if (video->outputMB->mb_x >= currVol->nMBPerRow)
{
video->outputMB->mb_x = 0;
video->outputMB->mb_y++;
offset += (lx << 4) - width;
}
video->mbnum = mbnum;
video->offset = offset;
video->sliceNo[mbnum] = slice_counter;
return status;
}
}
}
#endif /* H263_ONLY */
offset += 16;
mbnum++; /* has to increment before SCD, to preserve Mode[mbnum] */
} /* End of For ind_x */
offset += (lx << 4) - width;
if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */
{
#ifdef H263_GOB_CHANGES
slice_counter++;
video->header_bits += BitstreamShortHeaderByteAlignStuffing(bs1);
#endif
//video->header_bits+=BitstreamShortHeaderByteAlignStuffing(bs1);
/* check if time to packetize */
if (currVol->stream->byteCount + bs1->byteCount > currVol->stream->bufferSize)
{
if (byteCount == byteCount1) /* a single GOB bigger than packet size */
{
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
status = PV_END_OF_BUF;
video->end_of_buf = 1;
start_packet_header = 1;
}
else /* for short_header scooch back to previous GOB */
{
num_bits = ((bs1->byteCount - byteCount) << 3);
//num_bits = ((bs1->byteCount<<3) + bs1->bitCount) - ((byteCount<<3) + bitCount);
BitstreamRepos(bs1, byteCount, 0);
//BitstreamRepos(bs1,byteCount,bitCount);
// k = currVol->stream->byteCount; /* save state before appending */
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
BitstreamFlushBits(bs1, num_bits);
// if(mbnum == nTotalMB || k + bs1->byteCount >= currVol->stream->bufferSize){
/* last GOB or current one with larger size will be returned next run */
// status = PV_END_OF_BUF;
// video->end_of_buf = 1;
// }
start_packet_header = 1;
if (mbnum == nTotalMB) /* there's one more GOB to packetize for the next round */
{
status = PV_END_OF_BUF;
video->end_of_buf = 1;
}
}
if (mbnum < nTotalMB) /* return here */
{
/* move on to next MB */
video->outputMB->mb_x = 0;
video->outputMB->mb_y++;
video->mbnum = mbnum;
video->offset = offset;
video->sliceNo[mbnum] = slice_counter;
return status;
}
}
else if (mbnum < nTotalMB) /* do not write GOB header if end of vop */
{
bitCount = BitstreamGetPos(bs1);
byteCount = bitCount >> 3; /* save the position before GOB header */
bitCount = bitCount & 0x7;
#ifdef H263_GOB_CHANGES
video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */
status = EncodeGOBHeader(video, slice_counter, QP, 1); /* Encode GOB Header */
video->header_bits += BitstreamGetPos(bs1); /* Header Bits */
#endif
}
}
} /* End of For ind_y */
#ifndef H263_ONLY
if (!currVol->shortVideoHeader) /* Combined Mode*/
{
if (!currVol->ResyncMarkerDisable) /* Resync Markers */
{
if (!start_packet_header)
{
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1);/* Byte Align */
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Put Packet to Buffer */
if (status == PV_END_OF_BUF)
{
video->end_of_buf = 1;
}
else
{
BitstreamEncReset(bs1);
}
}
}
else /* No Resync Markers */
{
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte Align */
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Initialize to 0 */
if (status == PV_END_OF_BUF)
{
video->end_of_buf = 1;
}
else
{
BitstreamEncReset(bs1);
}
}
}
else
#endif /* H263_ONLY */
{
if (!start_packet_header) /* not yet packetized */
{
video->header_bits += BitstreamShortHeaderByteAlignStuffing(bs1);
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
if (status == PV_END_OF_BUF)
{
video->end_of_buf = 1;
}
else
{
BitstreamEncReset(bs1);
video->end_of_buf = 0;
}
}
}
video->mbnum = mbnum;
if (mbnum < nTotalMB)
video->sliceNo[mbnum] = slice_counter;
video->offset = offset;
return status;
}
#endif /* NO_SLICE_ENCODE */
/* ======================================================================== */
/* Function : EncodeGOBHeader() */
/* Date : 09/05/2000 */
/* History : */
/* Purpose : Encode a frame of MPEG4 bitstream in Combined mode. */
/* In/out : */
/* Return : PV_SUCCESS if successful else PV_FAIL */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeGOBHeader(VideoEncData *video, Int GOB_number, Int quant_scale, Int bs1stream)
{
PV_STATUS status = PV_SUCCESS;
BitstreamEncVideo *stream = (bs1stream ? video->bitstream1 : video->vol[video->currLayer]->stream);
status = BitstreamPutGT16Bits(stream, 17, GOB_RESYNC_MARKER); /* gob_resync_marker */
status = BitstreamPutBits(stream, 5, GOB_number); /* Current gob_number */
status = BitstreamPutBits(stream, 2, video->currVop->gobFrameID); /* gob_frame_id */
status = BitstreamPutBits(stream, 5, quant_scale); /* quant_scale */
return status;
}

View File

@ -0,0 +1,482 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef H263_ONLY
#include "mp4def.h"
#include "mp4lib_int.h"
#include "bitstream_io.h"
#include "mp4enc_lib.h"
#include "m4venc_oscl.h"
/* ======================================================================== */
/* Function : EncodeFrameDataPartMode() */
/* Date : 09/6/2000 */
/* History : */
/* Purpose : Encode a frame of MPEG4 bitstream in datapartitioning mode. */
/* In/out : */
/* Return : PV_SUCCESS if successful else PV_FAIL */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeFrameDataPartMode(VideoEncData *video)
{
PV_STATUS status = PV_SUCCESS;
Vol *currVol = video->vol[video->currLayer];
Vop *currVop = video->currVop;
VideoEncParams *encParams = video->encParams;
Int width = currVop->width; /* has to be Vop, for multiple of 16 */
Int lx = currVop->pitch; /* with padding */
Int offset = 0;
Int ind_x, ind_y;
Int start_packet_header = 0;
UChar *QPMB = video->QPMB;
Int QP;
Int mbnum = 0, slice_counter = 0;
Int num_bits, packet_size = encParams->ResyncPacketsize;
BitstreamEncVideo *bs1 = video->bitstream1;
BitstreamEncVideo *bs2 = video->bitstream2;
BitstreamEncVideo *bs3 = video->bitstream3;
Int numHeaderBits;
approxDCT fastDCTfunction;
Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */
PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);
void (*MBVlcEncode)(VideoEncData*, Int[], void *);
void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);
video->QP_prev = currVop->quantizer;
numHeaderBits = BitstreamGetPos(bs1); /* Number of bits in VOP Header */
/* determine type of quantization */
#ifndef NO_MPEG_QUANT
if (currVol->quantType == 0)
CodeMB = &CodeMB_H263;
else
CodeMB = &CodeMB_MPEG;
#else
CodeMB = &CodeMB_H263;
#endif
/* determine which functions to be used, in MB-level */
if (currVop->predictionType == P_VOP)
MBVlcEncode = &MBVlcEncodeDataPar_P_VOP;
else if (currVop->predictionType == I_VOP)
MBVlcEncode = &MBVlcEncodeDataPar_I_VOP;
else /* B_VOP not implemented yet */
return PV_FAIL;
/* determine which VLC table to be used */
if (currVol->shortVideoHeader)
BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;
#ifndef NO_RVLC
else if (currVol->useReverseVLC)
BlockCodeCoeff = &BlockCodeCoeff_RVLC;
#endif
else
BlockCodeCoeff = &BlockCodeCoeff_Normal;
video->usePrevQP = 0;
for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */
{
video->outputMB->mb_y = ind_y; /* 5/28/01 */
for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */
{
video->outputMB->mb_x = ind_x; /* 5/28/01 */
video->mbnum = mbnum;
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number */
QP = QPMB[mbnum]; /* always read new QP */
/****************************************************************************************/
/* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */
/****************************************************************************************/
getMotionCompensatedMB(video, ind_x, ind_y, offset);
if (start_packet_header)
{
slice_counter++; /* Increment slice counter */
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/
video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */
video->QP_prev = currVop->quantizer; /* store QP */
status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0);
video->header_bits += BitstreamGetPos(bs1); /* Header Bits */
numHeaderBits = BitstreamGetPos(bs1);
start_packet_header = 0;
video->usePrevQP = 0;
}
/***********************************************/
/* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */
/***********************************************/
status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);
/************************************/
/* MB VLC Encode: VLC Encode MB */
/************************************/
MBVlcEncode(video, ncoefblck, (void*)BlockCodeCoeff);
/*************************************************************/
/* Assemble Packets: Assemble the MB VLC codes into Packets */
/*************************************************************/
/* INCLUDE VOP HEADER IN COUNT */
num_bits = BitstreamGetPos(bs1) + BitstreamGetPos(bs2) +
BitstreamGetPos(bs3) - numHeaderBits;
/* Assemble_Packet(video) */
if (num_bits > packet_size)
{
if (video->currVop->predictionType == I_VOP)
BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */
else
BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/
BitstreamAppendEnc(bs1, bs2); /* Combine bs1 and bs2 */
BitstreamAppendEnc(bs1, bs3); /* Combine bs1 and bs3 */
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */
status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */
/* continue even if status == PV_END_OF_BUF, to get the stats */
BitstreamEncReset(bs1); /* Initialize to 0 */
BitstreamEncReset(bs2);
BitstreamEncReset(bs3);
start_packet_header = 1;
}
mbnum++;
offset += 16;
} /* End of For ind_x */
offset += (lx << 4) - width;
} /* End of For ind_y */
if (!start_packet_header)
{
if (video->currVop->predictionType == I_VOP)
{
BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */
video->header_bits += 19;
}
else
{
BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /* Add motion_marker */
video->header_bits += 17;
}
BitstreamAppendEnc(bs1, bs2);
BitstreamAppendEnc(bs1, bs3);
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */
status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */
/* continue even if status == PV_END_OF_BUF, to get the stats */
BitstreamEncReset(bs1); /* Initialize to 0 */
BitstreamEncReset(bs2);
BitstreamEncReset(bs3);
}
return status; /* if status == PV_END_OF_BUF, this frame will be pre-skipped */
}
#ifndef NO_SLICE_ENCODE
/* ======================================================================== */
/* Function : EncodeSliceDataPartMode() */
/* Date : 04/19/2002 */
/* History : */
/* Purpose : Encode a slice of MPEG4 bitstream in DataPar mode and save */
/* the current MB to continue next time it is called. */
/* In/out : */
/* Return : PV_SUCCESS if successful else PV_FAIL */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeSliceDataPartMode(VideoEncData *video)
{
PV_STATUS status = PV_SUCCESS;
Vol *currVol = video->vol[video->currLayer];
Vop *currVop = video->currVop;
UChar mode, *Mode = video->headerInfo.Mode;
VideoEncParams *encParams = video->encParams;
Int nTotalMB = currVol->nTotalMB;
Int width = currVop->width; /* has to be Vop, for multiple of 16 */
Int lx = currVop->pitch; /* , with pading */
UChar *QPMB = video->QPMB;
Int QP;
Int ind_x = video->outputMB->mb_x, ind_y = video->outputMB->mb_y;
Int offset = video->offset; /* get current MB location */
Int mbnum = video->mbnum, slice_counter = video->sliceNo[mbnum]; /* get current MB location */
Int firstMB = mbnum;
Int start_packet_header = (mbnum != 0);
Int num_bits = 0;
Int packet_size = encParams->ResyncPacketsize - 1 - (currVop->predictionType == I_VOP ? 19 : 17);
BitstreamEncVideo *bs1 = video->bitstream1;
BitstreamEncVideo *bs2 = video->bitstream2;
BitstreamEncVideo *bs3 = video->bitstream3;
Int bitCount1 = 0, bitCount2 = 0, bitCount3 = 0, byteCount1 = 0, byteCount2 = 0, byteCount3 = 0;
Int numHeaderBits = 0;
approxDCT fastDCTfunction;
Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB, 5/18/2001 */
UChar CBP;
Short outputMB[6][64];
PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);
void (*MBVlcEncode)(VideoEncData*, Int[], void *);
void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);
Int k;
video->QP_prev = 31;
if (video->end_of_buf) /* left-over from previous run */
{
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
if (status != PV_END_OF_BUF)
{
BitstreamEncReset(bs1);
video->end_of_buf = 0;
}
return status;
}
if (mbnum == 0) /* only do this at the start of a frame */
{
QPMB[0] = video->QP_prev = QP = currVop->quantizer;
video->usePrevQP = 0;
numHeaderBits = BitstreamGetPos(bs1); /* Number of bits in VOP Header */
}
/* Re-assign fast functions on every slice, don't have to put it in the memory */
QP = QPMB[mbnum];
if (mbnum > 0) video->QP_prev = QPMB[mbnum-1];
/* determine type of quantization */
#ifndef NO_MPEG_QUANT
if (currVol->quantType == 0)
CodeMB = &CodeMB_H263;
else
CodeMB = &CodeMB_MPEG;
#else
CodeMB = &CodeMB_H263;
#endif
/* determine which functions to be used, in MB-level */
if (currVop->predictionType == P_VOP)
MBVlcEncode = &MBVlcEncodeDataPar_P_VOP;
else if (currVop->predictionType == I_VOP)
MBVlcEncode = &MBVlcEncodeDataPar_I_VOP;
else /* B_VOP not implemented yet */
return PV_FAIL;
/* determine which VLC table to be used */
#ifndef NO_RVLC
if (currVol->useReverseVLC)
BlockCodeCoeff = &BlockCodeCoeff_RVLC;
else
#endif
BlockCodeCoeff = &BlockCodeCoeff_Normal;
if (mbnum != 0)
{
goto JUMP_IN;
}
for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++) /* Col MB Loop */
{
video->outputMB->mb_y = ind_y; /* 5/28/01 */
for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++) /* Row MB Loop */
{
video->outputMB->mb_x = ind_x; /* 5/28/01 */
video->mbnum = mbnum;
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number */
/****************************************************************************************/
/* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */
/****************************************************************************************/
getMotionCompensatedMB(video, ind_x, ind_y, offset);
JUMP_IN:
QP = QPMB[mbnum]; /* always read new QP */
if (start_packet_header)
{
slice_counter++; /* Increment slice counter */
video->sliceNo[mbnum] = slice_counter; /* Update MB slice number*/
video->QP_prev = currVop->quantizer; /* store QP */
num_bits = BitstreamGetPos(bs1);
status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0);
numHeaderBits = BitstreamGetPos(bs1) - num_bits;
video->header_bits += numHeaderBits; /* Header Bits */
start_packet_header = 0;
video->usePrevQP = 0;
}
else /* don't encode the first MB in packet again */
{
/***********************************************/
/* Code_MB: DCT, Q, Q^(-1), IDCT, Motion Comp */
/***********************************************/
status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);
for (k = 0; k < 6; k++)
{
M4VENC_MEMCPY(outputMB[k], video->outputMB->block[k], sizeof(Short) << 6);
}
}
/************************************/
/* MB VLC Encode: VLC Encode MB */
/************************************/
/* save the state before VLC encoding */
bitCount1 = BitstreamGetPos(bs1);
bitCount2 = BitstreamGetPos(bs2);
bitCount3 = BitstreamGetPos(bs3);
byteCount1 = bitCount1 >> 3;
byteCount2 = bitCount2 >> 3;
byteCount3 = bitCount3 >> 3;
bitCount1 &= 0x7;
bitCount2 &= 0x7;
bitCount3 &= 0x7;
mode = Mode[mbnum];
CBP = video->headerInfo.CBP[mbnum];
/*************************************/
MBVlcEncode(video, ncoefblck, (void*)BlockCodeCoeff);
/*************************************************************/
/* Assemble Packets: Assemble the MB VLC codes into Packets */
/*************************************************************/
num_bits = BitstreamGetPos(bs1) + BitstreamGetPos(bs2) +
BitstreamGetPos(bs3);// - numHeaderBits; //include header bits
/* Assemble_Packet(video) */
if (num_bits > packet_size && mbnum != firstMB) /* encoding at least one more MB*/
{
BitstreamRepos(bs1, byteCount1, bitCount1); /* rewind one MB */
BitstreamRepos(bs2, byteCount2, bitCount2); /* rewind one MB */
BitstreamRepos(bs3, byteCount3, bitCount3); /* rewind one MB */
if (video->currVop->predictionType == I_VOP)
{
BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */
video->header_bits += 19;
}
else
{
BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/
video->header_bits += 17;
}
status = BitstreamAppendEnc(bs1, bs2); /* Combine with bs2 */
status = BitstreamAppendEnc(bs1, bs3); /* Combine with bs3 */
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
BitstreamEncReset(bs2);
BitstreamEncReset(bs3);
if (status == PV_END_OF_BUF) /* if cannot fit a buffer */
{
video->end_of_buf = 1;
}
else
{
BitstreamEncReset(bs1);
}
start_packet_header = 1;
if (mbnum < nTotalMB || video->end_of_buf) /* return here */
{
video->mbnum = mbnum;
video->sliceNo[mbnum] = slice_counter;
video->offset = offset;
Mode[mbnum] = mode;
video->headerInfo.CBP[mbnum] = CBP;
for (k = 0; k < 6; k++)
{
M4VENC_MEMCPY(video->outputMB->block[k], outputMB[k], sizeof(Short) << 6);
}
return status;
}
}
offset += 16;
mbnum++; /* has to increment before SCD, to preserve Mode[mbnum] */
} /* End of For ind_x */
offset += (lx << 4) - width;
} /* End of For ind_y */
if (!start_packet_header)
{
if (video->currVop->predictionType == I_VOP)
{
BitstreamPutGT16Bits(bs1, 19, DC_MARKER); /* Add dc_marker */
video->header_bits += 19;
}
else
{
BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/
video->header_bits += 17;
}
status = BitstreamAppendEnc(bs1, bs2); /* Combine with bs2 */
status = BitstreamAppendEnc(bs1, bs3); /* Combine with bs3 */
video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */
status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);
BitstreamEncReset(bs2);
BitstreamEncReset(bs3);
if (status == PV_END_OF_BUF)
{
video->end_of_buf = 1;
}
else
{
BitstreamEncReset(bs1);
}
}
video->mbnum = mbnum;
if (mbnum < nTotalMB)
video->sliceNo[mbnum] = slice_counter;
video->offset = offset;
return status;
}
#endif /* NO_SLICE_ENCODE */
#endif /* H263_ONLY */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,191 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _DCT_H_
#define _DCT_H_
const static Int ColThInter[32] = {0, 0x1C, 0x4C, 0x6C, 0x9C, 0xBC, 0xEC, 0x10C,
0x13C, 0x15C, 0x18C, 0x1AC, 0x1DC, 0x1FC, 0x22C, 0x24C,
0x27C, 0x29C, 0x2CC, 0x2EC, 0x31C, 0x33C, 0x36C, 0x38C,
0x3BC, 0x3DC, 0x40C, 0x42C, 0x45C, 0x47C, 0x4AC, 0x4CC
};
const static Int ColThIntra[32] = {0, 0x1C, 0x3C, 0x5C, 0x7C, 0x9C, 0xBC, 0xDC,
0xFC, 0x11C, 0x13C, 0x15C, 0x17C, 0x19C, 0x1BC, 0x1DC,
0x1FC, 0x21C, 0x23C, 0x25C, 0x27C, 0x29C, 0x2BC, 0x2DC,
0x2FC, 0x31C, 0x33C, 0x35C, 0x37C, 0x39C, 0x3BC, 0x3DC
};
/******************************************************/
/********** IDCT part **************************/
const static unsigned char imask[8] = {128, 64, 32, 16, 8, 4, 2, 1};
const static unsigned char mask[8] = {0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0xfe};
#define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */
#define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */
#define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */
#define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */
#define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */
#define W7 565 /* 2048*sqrt(2)*cos(7*pi/16) */
#ifdef __cplusplus
extern "C"
{
#endif
/* Reduced input IDCT */
void idct_col0(Short *blk);
void idct_col1(Short *blk);
void idct_col2(Short *blk);
void idct_col3(Short *blk);
void idct_col4(Short *blk);
void idct_col0x40(Short *blk);
void idct_col0x20(Short *blk);
void idct_col0x10(Short *blk);
void idct_rowInter(Short *srce, UChar *rec, Int lx);
void idct_row0Inter(Short *blk, UChar *rec, Int lx);
void idct_row1Inter(Short *blk, UChar *rec, Int lx);
void idct_row2Inter(Short *blk, UChar *rec, Int lx);
void idct_row3Inter(Short *blk, UChar *rec, Int lx);
void idct_row4Inter(Short *blk, UChar *rec, Int lx);
void idct_row0x40Inter(Short *blk, UChar *rec, Int lx);
void idct_row0x20Inter(Short *blk, UChar *rec, Int lx);
void idct_row0x10Inter(Short *blk, UChar *rec, Int lx);
void idct_row0xCCInter(Short *blk, UChar *rec, Int lx);
void idct_rowIntra(Short *srce, UChar *rec, Int lx);
void idct_row0Intra(Short *blk, UChar *rec, Int lx);
void idct_row1Intra(Short *blk, UChar *rec, Int lx);
void idct_row2Intra(Short *blk, UChar *rec, Int lx);
void idct_row3Intra(Short *blk, UChar *rec, Int lx);
void idct_row4Intra(Short *blk, UChar *rec, Int lx);
void idct_row0x40Intra(Short *blk, UChar *rec, Int lx);
void idct_row0x20Intra(Short *blk, UChar *rec, Int lx);
void idct_row0x10Intra(Short *blk, UChar *rec, Int lx);
void idct_row0xCCIntra(Short *blk, UChar *rec, Int lx);
void idct_rowzmv(Short *srce, UChar *rec, UChar *prev, Int lx);
void idct_row0zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row1zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row2zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row3zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row4zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row0x40zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row0x20zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row0x10zmv(Short *blk, UChar *rec, UChar *prev, Int lx);
void idct_row0xCCzmv(Short *blk, UChar *rec, UChar *prev, Int lx);
#ifdef __cplusplus
}
#endif
/* Look-up table mapping to RIDCT from bitmap */
#ifdef SMALL_DCT
static void (*const idctcolVCA[16])(Short*) =
{
&idct_col0, &idct_col4, &idct_col3, &idct_col4,
&idct_col2, &idct_col4, &idct_col3, &idct_col4,
&idct_col1, &idct_col4, &idct_col3, &idct_col4,
&idct_col2, &idct_col4, &idct_col3, &idct_col4
};
static void (*const idctrowVCAInter[16])(Short*, UChar*, Int) =
{
&idct_row0Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,
&idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,
&idct_row1Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,
&idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter
};
static void (*const idctrowVCAzmv[16])(Short*, UChar*, UChar*, Int) =
{
&idct_row0zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,
&idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,
&idct_row1zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,
&idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv
};
static void (*const idctrowVCAIntra[16])(Short*, UChar*, Int) =
{
&idct_row0Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,
&idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,
&idct_row1Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,
&idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra
};
#else /* SMALL_DCT */
static void (*const idctcolVCA[16])(Short*) =
{
&idct_col0, &idct_col0x10, &idct_col0x20, &idct_col4,
&idct_col0x40, &idct_col4, &idct_col3, &idct_col4,
&idct_col1, &idct_col4, &idct_col3, &idct_col4,
&idct_col2, &idct_col4, &idct_col3, &idct_col4
};
static void (*const idctrowVCAInter[16])(Short*, UChar*, Int) =
{
&idct_row0Inter, &idct_row0x10Inter, &idct_row0x20Inter, &idct_row4Inter,
&idct_row0x40Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,
&idct_row1Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,
&idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter
};
static void (*const idctrowVCAzmv[16])(Short*, UChar*, UChar*, Int) =
{
&idct_row0zmv, &idct_row0x10zmv, &idct_row0x20zmv, &idct_row4zmv,
&idct_row0x40zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,
&idct_row1zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,
&idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv
};
static void (*const idctrowVCAIntra[16])(Short*, UChar*, Int) =
{
&idct_row0Intra, &idct_row0x10Intra, &idct_row0x20Intra, &idct_row4Intra,
&idct_row0x40Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,
&idct_row1Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,
&idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra
};
#endif /* SMALL_DCT */
#ifdef __cplusplus
extern "C"
{
#endif
/* part in AppVCA_dct.c */
//void Block1x1DCTzmv (Short *out,UChar *prev,UChar *cur,UChar *rec,Int lx,Int chroma);
void Block1x1DCTwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);
void Block1x1DCTIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);
/* This part is in dct_aan.c */
Void BlockDCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);
Void Block4x4DCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);
Void Block2x2DCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);
//Void BlockDCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma);
//Void Block4x4DCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma);
//Void Block2x2DCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma);
Void BlockDCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);
Void Block4x4DCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);
Void Block2x2DCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);
#ifdef __cplusplus
}
#endif
#endif //_DCT_H_

View File

@ -0,0 +1,395 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/* Filename: dct_inline.h */
/* Description: Implementation for in-line functions used in dct.cpp */
/* Modified: */
/*********************************************************************************/
#ifndef _DCT_INLINE_H_
#define _DCT_INLINE_H_
#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4)
__inline int32 mla724(int32 op1, int32 op2, int32 op3)
{
int32 out;
OSCL_UNUSED_ARG(op1);
out = op2 * 724 + op3; /* op1 is not used here */
return out;
}
__inline int32 mla392(int32 k0, int32 k14, int32 round)
{
int32 k1;
OSCL_UNUSED_ARG(k14);
k1 = k0 * 392 + round;
return k1;
}
__inline int32 mla554(int32 k4, int32 k12, int32 k1)
{
int32 k0;
OSCL_UNUSED_ARG(k12);
k0 = k4 * 554 + k1;
return k0;
}
__inline int32 mla1338(int32 k6, int32 k14, int32 k1)
{
int32 out;
OSCL_UNUSED_ARG(k14);
out = k6 * 1338 + k1;
return out;
}
__inline int32 mla946(int32 k6, int32 k14, int32 k1)
{
int32 out;
OSCL_UNUSED_ARG(k14);
out = k6 * 946 + k1;
return out;
}
__inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3,
int32 k4, int32 k5, int32 k6, int32 k7)
{
int32 carry, abs_sum;
carry = k0 >> 31;
abs_sum = (k0 ^ carry);
carry = k1 >> 31;
abs_sum += (k1 ^ carry) - carry;
carry = k2 >> 31;
abs_sum += (k2 ^ carry) - carry;
carry = k3 >> 31;
abs_sum += (k3 ^ carry) - carry;
carry = k4 >> 31;
abs_sum += (k4 ^ carry) - carry;
carry = k5 >> 31;
abs_sum += (k5 ^ carry) - carry;
carry = k6 >> 31;
abs_sum += (k6 ^ carry) - carry;
carry = k7 >> 31;
abs_sum += (k7 ^ carry) - carry;
return abs_sum;
}
#elif defined(__CC_ARM) /* only work with arm v5 */
#if defined(__TARGET_ARCH_5TE)
__inline int32 mla724(int32 op1, int32 op2, int32 op3)
{
int32 out;
__asm
{
smlabb out, op1, op2, op3
}
return out;
}
__inline int32 mla392(int32 k0, int32 k14, int32 round)
{
int32 k1;
__asm
{
smlabt k1, k0, k14, round
}
return k1;
}
__inline int32 mla554(int32 k4, int32 k12, int32 k1)
{
int32 k0;
__asm
{
smlabt k0, k4, k12, k1
}
return k0;
}
__inline int32 mla1338(int32 k6, int32 k14, int32 k1)
{
int32 out;
__asm
{
smlabb out, k6, k14, k1
}
return out;
}
__inline int32 mla946(int32 k6, int32 k14, int32 k1)
{
int32 out;
__asm
{
smlabb out, k6, k14, k1
}
return out;
}
#else // not ARM5TE
__inline int32 mla724(int32 op1, int32 op2, int32 op3)
{
int32 out;
__asm
{
and out, op2, #0xFFFF
mla out, op1, out, op3
}
return out;
}
__inline int32 mla392(int32 k0, int32 k14, int32 round)
{
int32 k1;
__asm
{
mov k1, k14, asr #16
mla k1, k0, k1, round
}
return k1;
}
__inline int32 mla554(int32 k4, int32 k12, int32 k1)
{
int32 k0;
__asm
{
mov k0, k12, asr #16
mla k0, k4, k0, k1
}
return k0;
}
__inline int32 mla1338(int32 k6, int32 k14, int32 k1)
{
int32 out;
__asm
{
and out, k14, 0xFFFF
mla out, k6, out, k1
}
return out;
}
__inline int32 mla946(int32 k6, int32 k14, int32 k1)
{
int32 out;
__asm
{
and out, k14, 0xFFFF
mla out, k6, out, k1
}
return out;
}
#endif
__inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3,
int32 k4, int32 k5, int32 k6, int32 k7)
{
int32 carry, abs_sum;
__asm
{
eor carry, k0, k0, asr #31 ;
eors abs_sum, k1, k1, asr #31 ;
adc abs_sum, abs_sum, carry ;
eors carry, k2, k2, asr #31 ;
adc abs_sum, abs_sum, carry ;
eors carry, k3, k3, asr #31 ;
adc abs_sum, abs_sum, carry ;
eors carry, k4, k4, asr #31 ;
adc abs_sum, abs_sum, carry ;
eors carry, k5, k5, asr #31 ;
adc abs_sum, abs_sum, carry ;
eors carry, k6, k6, asr #31 ;
adc abs_sum, abs_sum, carry ;
eors carry, k7, k7, asr #31 ;
adc abs_sum, abs_sum, carry ;
}
return abs_sum;
}
#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */
__inline int32 mla724(int32 op1, int32 op2, int32 op3)
{
register int32 out;
register int32 aa = (int32)op1;
register int32 bb = (int32)op2;
register int32 cc = (int32)op3;
asm volatile("smlabb %0, %1, %2, %3"
: "=&r"(out)
: "r"(aa),
"r"(bb),
"r"(cc));
return out;
}
__inline int32 mla392(int32 k0, int32 k14, int32 round)
{
register int32 out;
register int32 aa = (int32)k0;
register int32 bb = (int32)k14;
register int32 cc = (int32)round;
asm volatile("smlabt %0, %1, %2, %3"
: "=&r"(out)
: "r"(aa),
"r"(bb),
"r"(cc));
return out;
}
__inline int32 mla554(int32 k4, int32 k12, int32 k1)
{
register int32 out;
register int32 aa = (int32)k4;
register int32 bb = (int32)k12;
register int32 cc = (int32)k1;
asm volatile("smlabt %0, %1, %2, %3"
: "=&r"(out)
: "r"(aa),
"r"(bb),
"r"(cc));
return out;
}
__inline int32 mla1338(int32 k6, int32 k14, int32 k1)
{
register int32 out;
register int32 aa = (int32)k6;
register int32 bb = (int32)k14;
register int32 cc = (int32)k1;
asm volatile("smlabb %0, %1, %2, %3"
: "=&r"(out)
: "r"(aa),
"r"(bb),
"r"(cc));
return out;
}
__inline int32 mla946(int32 k6, int32 k14, int32 k1)
{
register int32 out;
register int32 aa = (int32)k6;
register int32 bb = (int32)k14;
register int32 cc = (int32)k1;
asm volatile("smlabb %0, %1, %2, %3"
: "=&r"(out)
: "r"(aa),
"r"(bb),
"r"(cc));
return out;
}
__inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3,
int32 k4, int32 k5, int32 k6, int32 k7)
{
register int32 carry;
register int32 abs_sum;
register int32 aa = (int32)k0;
register int32 bb = (int32)k1;
register int32 cc = (int32)k2;
register int32 dd = (int32)k3;
register int32 ee = (int32)k4;
register int32 ff = (int32)k5;
register int32 gg = (int32)k6;
register int32 hh = (int32)k7;
asm volatile("eor %0, %2, %2, asr #31\n\t"
"eors %1, %3, %3, asr #31\n\t"
"adc %1, %1, %0\n\t"
"eors %0, %4, %4, asr #31\n\t"
"adc %1, %1, %0\n\t"
"eors %0, %5, %5, asr #31\n\t"
"adc %1, %1, %0\n\t"
"eors %0, %6, %6, asr #31\n\t"
"adc %1, %1, %0\n\t"
"eors %0, %7, %7, asr #31\n\t"
"adc %1, %1, %0\n\t"
"eors %0, %8, %8, asr #31\n\t"
"adc %1, %1, %0\n\t"
"eors %0, %9, %9, asr #31\n\t"
"adc %1, %1, %0\n\t"
: "=&r"(carry),
"=&r"(abs_sum):
"r"(aa),
"r"(bb),
"r"(cc),
"r"(dd),
"r"(ee),
"r"(ff),
"r"(gg),
"r"(hh));
return abs_sum;
}
#endif // Diff. OS
#endif //_DCT_INLINE_H_

View File

@ -0,0 +1,622 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4lib_int.h"
#include "mp4enc_lib.h"
#include "dct.h"
#include "m4venc_oscl.h"
/* ======================================================================== */
/* Function : CodeMB_H263( ) */
/* Date : 8/15/2001 */
/* Purpose : Perform residue calc (only zero MV), DCT, H263 Quant/Dequant,*/
/* IDCT and motion compensation.Modified from FastCodeMB() */
/* Input : */
/* video Video encoder data structure */
/* function Approximate DCT function, scaling and threshold */
/* ncoefblck Array for last nonzero coeff for speedup in VlcEncode */
/* QP Combined offset from the origin to the current */
/* macroblock and QP for current MB. */
/* Output : */
/* video->outputMB Quantized DCT coefficients. */
/* currVop->yChan,uChan,vChan Reconstructed pixels */
/* */
/* Return : PV_STATUS */
/* Modified : */
/* 2/26/01
-modified threshold based on correlation coeff 0.75 only for mode H.263
-ncoefblck[] as input, to keep position of last non-zero coeff*/
/* 8/10/01
-modified threshold based on correlation coeff 0.5
-used column threshold to speedup column DCT.
-used bitmap zigzag to speedup RunLevel(). */
/* ======================================================================== */
PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[])
{
Int sad, k, CBP, mbnum = video->mbnum;
Short *output, *dataBlock;
UChar Mode = video->headerInfo.Mode[mbnum];
UChar *bitmapcol, *bitmaprow = video->bitmaprow;
UInt *bitmapzz ;
UChar shortHeader = video->vol[video->currLayer]->shortVideoHeader;
Int dc_scaler = 8;
Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);
struct QPstruct QuantParam;
Int dctMode, DctTh1;
Int ColTh;
Int(*BlockQuantDequantH263)(Short *, Short *, struct QPstruct *,
UChar[], UChar *, UInt *, Int, Int, Int, UChar);
Int(*BlockQuantDequantH263DC)(Short *, Short *, struct QPstruct *,
UChar *, UInt *, Int, UChar);
void (*BlockDCT1x1)(Short *, UChar *, UChar *, Int);
void (*BlockDCT2x2)(Short *, UChar *, UChar *, Int);
void (*BlockDCT4x4)(Short *, UChar *, UChar *, Int);
void (*BlockDCT8x8)(Short *, UChar *, UChar *, Int);
/* motion comp. related var. */
Vop *currVop = video->currVop;
VideoEncFrameIO *inputFrame = video->input;
Int ind_x = video->outputMB->mb_x;
Int ind_y = video->outputMB->mb_y;
Int lx = currVop->pitch;
Int width = currVop->width;
UChar *rec, *input, *pred;
Int offset = QP >> 5; /* QP is combined offset and QP */
Int offsetc = (offset >> 2) + (ind_x << 2); /* offset for chrom */
/*****************************/
OSCL_UNUSED_ARG(function);
output = video->outputMB->block[0];
CBP = 0;
QP = QP & 0x1F;
// M4VENC_MEMSET(output,0,(sizeof(Short)<<6)*6); /* reset quantized coeff. to zero , 7/24/01*/
QuantParam.QPx2 = QP << 1;
QuantParam.QP = QP;
QuantParam.QPdiv2 = QP >> 1;
QuantParam.QPx2plus = QuantParam.QPx2 + QuantParam.QPdiv2;
QuantParam.Addition = QP - 1 + (QP & 0x1);
if (intra)
{
BlockDCT1x1 = &Block1x1DCTIntra;
BlockDCT2x2 = &Block2x2DCT_AANIntra;
BlockDCT4x4 = &Block4x4DCT_AANIntra;
BlockDCT8x8 = &BlockDCT_AANIntra;
BlockQuantDequantH263 = &BlockQuantDequantH263Intra;
BlockQuantDequantH263DC = &BlockQuantDequantH263DCIntra;
if (shortHeader)
{
dc_scaler = 8;
}
else
{
dc_scaler = cal_dc_scalerENC(QP, 1); /* luminance blocks */
}
DctTh1 = (Int)(dc_scaler * 3);//*1.829
ColTh = ColThIntra[QP];
}
else
{
BlockDCT1x1 = &Block1x1DCTwSub;
BlockDCT2x2 = &Block2x2DCT_AANwSub;
BlockDCT4x4 = &Block4x4DCT_AANwSub;
BlockDCT8x8 = &BlockDCT_AANwSub;
BlockQuantDequantH263 = &BlockQuantDequantH263Inter;
BlockQuantDequantH263DC = &BlockQuantDequantH263DCInter;
ColTh = ColThInter[QP];
DctTh1 = (Int)(16 * QP); //9*QP;
}
rec = currVop->yChan + offset;
input = inputFrame->yChan + offset;
if (lx != width) input -= (ind_y << 9); /* non-padded offset */
dataBlock = video->dataBlock;
pred = video->predictedMB;
for (k = 0; k < 6; k++)
{
CBP <<= 1;
bitmapcol = video->bitmapcol[k];
bitmapzz = video->bitmapzz[k]; /* 7/30/01 */
if (k < 4)
{
sad = video->mot[mbnum][k+1].sad;
if (k&1)
{
rec += 8;
input += 8;
}
else if (k == 2)
{
dctMode = ((width << 3) - 8);
input += dctMode;
dctMode = ((lx << 3) - 8);
rec += dctMode;
}
}
else
{
if (k == 4)
{
rec = currVop->uChan + offsetc;
input = inputFrame->uChan + offsetc;
if (lx != width) input -= (ind_y << 7);
lx >>= 1;
width >>= 1;
if (intra)
{
sad = getBlockSum(input, width);
if (shortHeader)
dc_scaler = 8;
else
{
dc_scaler = cal_dc_scalerENC(QP, 2); /* chrominance blocks */
}
DctTh1 = (Int)(dc_scaler * 3);//*1.829
}
else
sad = Sad8x8(input, pred, width);
}
else
{
rec = currVop->vChan + offsetc;
input = inputFrame->vChan + offsetc;
if (lx != width) input -= (ind_y << 7);
if (intra)
{
sad = getBlockSum(input, width);
}
else
sad = Sad8x8(input, pred, width);
}
}
if (sad < DctTh1 && !(shortHeader && intra)) /* all-zero */
{ /* For shortHeader intra block, DC value cannot be zero */
dctMode = 0;
CBP |= 0;
ncoefblck[k] = 0;
}
else if (sad < 18*QP/*(QP<<4)*/) /* DC-only */
{
dctMode = 1;
BlockDCT1x1(dataBlock, input, pred, width);
CBP |= (*BlockQuantDequantH263DC)(dataBlock, output, &QuantParam,
bitmaprow + k, bitmapzz, dc_scaler, shortHeader);
ncoefblck[k] = 1;
}
else
{
dataBlock[64] = ColTh;
if (sad < 22*QP/*(QP<<4)+(QP<<1)*/) /* 2x2 DCT */
{
dctMode = 2;
BlockDCT2x2(dataBlock, input, pred, width);
ncoefblck[k] = 6;
}
else if (sad < (QP << 5)) /* 4x4 DCT */
{
dctMode = 4;
BlockDCT4x4(dataBlock, input, pred, width);
ncoefblck[k] = 26;
}
else /* Full-DCT */
{
dctMode = 8;
BlockDCT8x8(dataBlock, input, pred, width);
ncoefblck[k] = 64;
}
CBP |= (*BlockQuantDequantH263)(dataBlock, output, &QuantParam,
bitmapcol, bitmaprow + k, bitmapzz, dctMode, k, dc_scaler, shortHeader);
}
BlockIDCTMotionComp(dataBlock, bitmapcol, bitmaprow[k], dctMode, rec, pred, (lx << 1) | intra);
output += 64;
if (!(k&1))
{
pred += 8;
}
else
{
pred += 120;
}
}
video->headerInfo.CBP[mbnum] = CBP; /* 5/18/2001 */
return PV_SUCCESS;
}
#ifndef NO_MPEG_QUANT
/* ======================================================================== */
/* Function : CodeMB_MPEG( ) */
/* Date : 8/15/2001 */
/* Purpose : Perform residue calc (only zero MV), DCT, MPEG Quant/Dequant,*/
/* IDCT and motion compensation.Modified from FastCodeMB() */
/* Input : */
/* video Video encoder data structure */
/* function Approximate DCT function, scaling and threshold */
/* ncoefblck Array for last nonzero coeff for speedup in VlcEncode */
/* QP Combined offset from the origin to the current */
/* macroblock and QP for current MB. */
/* Output : */
/* video->outputMB Quantized DCT coefficients. */
/* currVop->yChan,uChan,vChan Reconstructed pixels */
/* */
/* Return : PV_STATUS */
/* Modified : */
/* 2/26/01
-modified threshold based on correlation coeff 0.75 only for mode H.263
-ncoefblck[] as input, keep position of last non-zero coeff*/
/* 8/10/01
-modified threshold based on correlation coeff 0.5
-used column threshold to speedup column DCT.
-used bitmap zigzag to speedup RunLevel(). */
/* ======================================================================== */
PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[])
{
Int sad, k, CBP, mbnum = video->mbnum;
Short *output, *dataBlock;
UChar Mode = video->headerInfo.Mode[mbnum];
UChar *bitmapcol, *bitmaprow = video->bitmaprow;
UInt *bitmapzz ;
Int dc_scaler = 8;
Vol *currVol = video->vol[video->currLayer];
Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);
Int *qmat;
Int dctMode, DctTh1, DctTh2, DctTh3, DctTh4;
Int ColTh;
Int(*BlockQuantDequantMPEG)(Short *, Short *, Int, Int *,
UChar [], UChar *, UInt *, Int, Int, Int);
Int(*BlockQuantDequantMPEGDC)(Short *, Short *, Int, Int *,
UChar [], UChar *, UInt *, Int);
void (*BlockDCT1x1)(Short *, UChar *, UChar *, Int);
void (*BlockDCT2x2)(Short *, UChar *, UChar *, Int);
void (*BlockDCT4x4)(Short *, UChar *, UChar *, Int);
void (*BlockDCT8x8)(Short *, UChar *, UChar *, Int);
/* motion comp. related var. */
Vop *currVop = video->currVop;
VideoEncFrameIO *inputFrame = video->input;
Int ind_x = video->outputMB->mb_x;
Int ind_y = video->outputMB->mb_y;
Int lx = currVop->pitch;
Int width = currVop->width;
UChar *rec, *input, *pred;
Int offset = QP >> 5;
Int offsetc = (offset >> 2) + (ind_x << 2); /* offset for chrom */
/*****************************/
OSCL_UNUSED_ARG(function);
output = video->outputMB->block[0];
CBP = 0;
QP = QP & 0x1F;
// M4VENC_MEMSET(output,0,(sizeof(Short)<<6)*6); /* reset quantized coeff. to zero , 7/24/01*/
if (intra)
{
BlockDCT1x1 = &Block1x1DCTIntra;
BlockDCT2x2 = &Block2x2DCT_AANIntra;
BlockDCT4x4 = &Block4x4DCT_AANIntra;
BlockDCT8x8 = &BlockDCT_AANIntra;
BlockQuantDequantMPEG = &BlockQuantDequantMPEGIntra;
BlockQuantDequantMPEGDC = &BlockQuantDequantMPEGDCIntra;
dc_scaler = cal_dc_scalerENC(QP, 1); /* luminance blocks */
qmat = currVol->iqmat;
DctTh1 = (Int)(3 * dc_scaler);//2*dc_scaler);
DctTh2 = (Int)((1.25 * QP - 1) * qmat[1] * 0.45);//0.567);//0.567);
DctTh3 = (Int)((1.25 * QP - 1) * qmat[2] * 0.55);//1.162); /* 8/2/2001 */
DctTh4 = (Int)((1.25 * QP - 1) * qmat[32] * 0.8);//1.7583);//0.7942);
ColTh = ColThIntra[QP];
}
else
{
BlockDCT1x1 = &Block1x1DCTwSub;
BlockDCT2x2 = &Block2x2DCT_AANwSub;
BlockDCT4x4 = &Block4x4DCT_AANwSub;
BlockDCT8x8 = &BlockDCT_AANwSub;
BlockQuantDequantMPEG = &BlockQuantDequantMPEGInter;
BlockQuantDequantMPEGDC = &BlockQuantDequantMPEGDCInter;
qmat = currVol->niqmat;
DctTh1 = (Int)(((QP << 1) - 0.5) * qmat[0] * 0.4);//0.2286);//0.3062);
DctTh2 = (Int)(((QP << 1) - 0.5) * qmat[1] * 0.45);//0.567);//0.4);
DctTh3 = (Int)(((QP << 1) - 0.5) * qmat[2] * 0.55);//1.162); /* 8/2/2001 */
DctTh4 = (Int)(((QP << 1) - 0.5) * qmat[32] * 0.8);//1.7583);//0.7942);
ColTh = ColThInter[QP];
}// get qmat, DctTh1, DctTh2, DctTh3
rec = currVop->yChan + offset;
input = inputFrame->yChan + offset;
if (lx != width) input -= (ind_y << 9); /* non-padded offset */
dataBlock = video->dataBlock;
pred = video->predictedMB;
for (k = 0; k < 6; k++)
{
CBP <<= 1;
bitmapcol = video->bitmapcol[k];
bitmapzz = video->bitmapzz[k]; /* 8/2/01 */
if (k < 4)
{//Y block
sad = video->mot[mbnum][k+1].sad;
if (k&1)
{
rec += 8;
input += 8;
}
else if (k == 2)
{
dctMode = ((width << 3) - 8);
input += dctMode;
dctMode = ((lx << 3) - 8);
rec += dctMode;
}
}
else
{// U, V block
if (k == 4)
{
rec = currVop->uChan + offsetc;
input = inputFrame->uChan + offsetc;
if (lx != width) input -= (ind_y << 7);
lx >>= 1;
width >>= 1;
if (intra)
{
dc_scaler = cal_dc_scalerENC(QP, 2); /* luminance blocks */
DctTh1 = dc_scaler * 3;
sad = getBlockSum(input, width);
}
else
sad = Sad8x8(input, pred, width);
}
else
{
rec = currVop->vChan + offsetc;
input = inputFrame->vChan + offsetc;
if (lx != width) input -= (ind_y << 7);
if (intra)
sad = getBlockSum(input, width);
else
sad = Sad8x8(input, pred, width);
}
}
if (sad < DctTh1) /* all-zero */
{
dctMode = 0;
CBP |= 0;
ncoefblck[k] = 0;
}
else if (sad < DctTh2) /* DC-only */
{
dctMode = 1;
BlockDCT1x1(dataBlock, input, pred, width);
CBP |= (*BlockQuantDequantMPEGDC)(dataBlock, output, QP, qmat,
bitmapcol, bitmaprow + k, bitmapzz, dc_scaler);
ncoefblck[k] = 1;
}
else
{
dataBlock[64] = ColTh;
if (sad < DctTh3) /* 2x2-DCT */
{
dctMode = 2;
BlockDCT2x2(dataBlock, input, pred, width);
ncoefblck[k] = 6;
}
else if (sad < DctTh4) /* 4x4 DCT */
{
dctMode = 4;
BlockDCT4x4(dataBlock, input, pred, width);
ncoefblck[k] = 26;
}
else /* full-DCT */
{
dctMode = 8;
BlockDCT8x8(dataBlock, input, pred, width);
ncoefblck[k] = 64;
}
CBP |= (*BlockQuantDequantMPEG)(dataBlock, output, QP, qmat,
bitmapcol, bitmaprow + k, bitmapzz, dctMode, k, dc_scaler); //
}
dctMode = 8; /* for mismatch handle */
BlockIDCTMotionComp(dataBlock, bitmapcol, bitmaprow[k], dctMode, rec, pred, (lx << 1) | (intra));
output += 64;
if (!(k&1))
{
pred += 8;
}
else
{
pred += 120;
}
}
video->headerInfo.CBP[mbnum] = CBP; /* 5/18/2001 */
return PV_SUCCESS;
}
#endif
/* ======================================================================== */
/* Function : getBlockSAV( ) */
/* Date : 8/10/2000 */
/* Purpose : Get SAV for one block */
/* In/out : block[64] contain one block data */
/* Return : */
/* Modified : */
/* ======================================================================== */
/* can be written in MMX or SSE, 2/22/2001 */
Int getBlockSAV(Short block[])
{
Int i, val, sav = 0;
i = 8;
while (i--)
{
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
val = *block++;
if (val > 0) sav += val;
else sav -= val;
}
return sav;
}
/* ======================================================================== */
/* Function : Sad8x8( ) */
/* Date : 8/10/2000 */
/* Purpose : Find SAD between prev block and current block */
/* In/out : Previous and current frame block pointers, and frame width */
/* Return : */
/* Modified : */
/* 8/15/01, - do 4 pixel at a time assuming 32 bit register */
/* ======================================================================== */
Int Sad8x8(UChar *cur, UChar *prev, Int width)
{
UChar *end = cur + (width << 3);
Int sad = 0;
Int *curInt = (Int*) cur;
Int *prevInt = (Int*) prev;
Int cur1, cur2, prev1, prev2;
UInt mask, sgn_msk = 0x80808080;
Int sum2 = 0, sum4 = 0;
Int tmp;
do
{
mask = ~(0xFF00);
cur1 = curInt[1]; /* load cur[4..7] */
cur2 = curInt[0];
curInt += (width >> 2); /* load cur[0..3] and +=lx */
prev1 = prevInt[1];
prev2 = prevInt[0];
prevInt += 4;
tmp = prev2 ^ cur2;
cur2 = prev2 - cur2;
tmp = tmp ^ cur2; /* (^)^(-) last bit is one if carry */
tmp = sgn_msk & ((UInt)tmp >> 1); /* check the sign of each byte */
if (cur2 < 0) tmp = tmp | 0x80000000; /* corcurt sign of first byte */
tmp = (tmp << 8) - tmp; /* carry borrowed bytes are marked with 0x1FE */
cur2 = cur2 + (tmp >> 7); /* negative bytes is added with 0xFF, -1 */
cur2 = cur2 ^(tmp >> 7); /* take absolute by inverting bits (EOR) */
tmp = prev1 ^ cur1;
cur1 = prev1 - cur1;
tmp = tmp ^ cur1; /* (^)^(-) last bit is one if carry */
tmp = sgn_msk & ((UInt)tmp >> 1); /* check the sign of each byte */
if (cur1 < 0) tmp = tmp | 0x80000000; /* corcurt sign of first byte */
tmp = (tmp << 8) - tmp; /* carry borrowed bytes are marked with 0x1FE */
cur1 = cur1 + (tmp >> 7); /* negative bytes is added with 0xFF, -1 */
cur1 = cur1 ^(tmp >> 7); /* take absolute by inverting bits (EOR) */
sum4 = sum4 + cur1;
cur1 = cur1 & (mask << 8); /* mask first and third bytes */
sum2 = sum2 + ((UInt)cur1 >> 8);
sum4 = sum4 + cur2;
cur2 = cur2 & (mask << 8); /* mask first and third bytes */
sum2 = sum2 + ((UInt)cur2 >> 8);
}
while ((UInt)curInt < (UInt)end);
cur1 = sum4 - (sum2 << 8); /* get even-sum */
cur1 = cur1 + sum2; /* add 16 bit even-sum and odd-sum*/
cur1 = cur1 + (cur1 << 16); /* add upper and lower 16 bit sum */
sad = ((UInt)cur1 >> 16); /* take upper 16 bit */
return sad;
}
/* ======================================================================== */
/* Function : getBlockSum( ) */
/* Date : 8/10/2000 */
/* Purpose : Find summation of value within a block. */
/* In/out : Pointer to current block in a frame and frame width */
/* Return : */
/* Modified : */
/* 8/15/01, - SIMD 4 pixels at a time */
/* ======================================================================== */
Int getBlockSum(UChar *cur, Int width)
{
Int sad = 0, sum4 = 0, sum2 = 0;
UChar *end = cur + (width << 3);
Int *curInt = (Int*)cur;
UInt mask = ~(0xFF00);
Int load1, load2;
do
{
load1 = curInt[1];
load2 = curInt[0];
curInt += (width >> 2);
sum4 += load1;
load1 = load1 & (mask << 8); /* even bytes */
sum2 += ((UInt)load1 >> 8); /* sum even bytes, 16 bit */
sum4 += load2;
load2 = load2 & (mask << 8); /* even bytes */
sum2 += ((UInt)load2 >> 8); /* sum even bytes, 16 bit */
}
while ((UInt)curInt < (UInt)end);
load1 = sum4 - (sum2 << 8); /* get even-sum */
load1 = load1 + sum2; /* add 16 bit even-sum and odd-sum*/
load1 = load1 + (load1 << 16); /* add upper and lower 16 bit sum */
sad = ((UInt)load1 >> 16); /* take upper 16 bit */
return sad;
}

View File

@ -0,0 +1,103 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/* ===================================================================== */
/* File: FastCodeMB.h */
/* Description: This file contains structure and function prototypes used
in FastCodeMB() function. When it is decided to use FastCodeMB
instead of CodeMB, all of this prototypes should be migrated to
mp4enc_lib.h. */
/* Rev: */
/* Created: 8/14/01 */
/* //////////////////////////////////////////////////////////////////////// */
typedef struct struct_approxDCT approxDCT;
struct struct_approxDCT
{
const Int *scale;
Int(*DCT)(Int block[ ], Int coeff[ ], approxDCT *);
// Threshold value for H.263 Quantizer
Int th_app_all[8];
Int th_app_odd[8];
Int th_app_even[8];
Int th_app_even1[8];
Int th_app_even2[8];
};
struct QPstruct
{
Int QPx2 ;
Int QP;
Int QPdiv2;
Int QPx2plus;
Int Addition;
};
/*---- FastCodeMB.c -----*/
void initCodeMB(approxDCT *function, Int QP);
PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[], Int offset);
PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[], Int offset);
Int getBlockSAV(Int block[]);
Int Sad8x8(UChar *rec, UChar *prev, Int lx);
Int getBlockSum(UChar *rec, Int lx);
/*---- AppVCA_dct.c -----*/
Int AppVCA1_dct(Int block[], Int out[ ], approxDCT *function);
Int AppVCA2_dct(Int block[], Int out[ ], approxDCT *function);
Int AppVCA3_dct(Int block[], Int out[ ], approxDCT *function);
Int AppVCA4_dct(Int block[], Int out[ ], approxDCT *function);
Int AppVCA5_dct(Int block[], Int out[ ], approxDCT *function);
/*---- FastQuant.c -----*/
Int cal_dc_scalerENC(Int QP, Int type) ;
Int BlockQuantDequantH263Inter(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dummy);
Int BlockQuantDequantH263Intra(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dc_scaler);
Int BlockQuantDequantH263DCInter(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,
UChar *bitmaprow, UInt *bitmapzz, Int dummy);
Int BlockQuantDequantH263DCIntra(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,
UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler);
Int BlockQuantDequantMPEGInter(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int DctMode, Int comp, Int dc_scaler);
Int BlockQuantDequantMPEGIntra(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int DctMode, Int comp, Int dc_scaler);
Int BlockQuantDequantMPEGDCInter(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy);
Int BlockQuantDequantMPEGDCIntra(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler);
/*---- FastIDCT.c -----*/
void BlockIDCTMotionComp(Int *block, UChar *bitmapcol, UChar bitmaprow,
Int dctMode, UChar *rec, Int lx, Int intra);
/*---- motion_comp.c -----*/
void PutSkippedBlock(UChar *rec, UChar *prev, Int lx);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,967 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4enc_lib.h"
#include "fastquant_inline.h"
#define siz 63
#define LSL 18
const static UChar imask[8] = {128, 64, 32, 16, 8, 4, 2, 1};
#define SIGN0(a) ( ((a)<0) ? -1 : (((a)>0) ? 1 : 0) )
/* variable bit precision quantization scale */
/* used to avoid using 32-bit multiplication */
const static Short scaleArrayV[32] = {0, 16384, 8192, 5462, /* 15 */
4096, 3277, 2731, 2341,
4096, 3641, 3277, 2979, /* 16 */
2731, 2521, 2341, 2185,
4096, 3856, 3641, 3450, /* 17 */
3277, 3121, 2979, 2850,
5462, 5243, 5042, 4855, /* 18 */
4682, 4520, 4370, 4229
};
/* scale for dc_scaler and qmat, note, no value smaller than 8 */
const static Short scaleArrayV2[47] = {0, 0, 0, 0, 0, 0, 0, 0, /* 15 */
4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185,
4096, 3856, 3641, 3450, 3277, 3121, 2979, 2850, /* 16 */
2731, 2622, 2521, 2428, 2341, 2260, 2185, 2115,
4096, 3972, 3856, 3745, 3641, 3543, 3450, 3361, /* 17 */
3277, 3197, 3121, 3049, 2979, 2913, 2850
};
/* AAN scale and zigzag */
const static Short AANScale[64] =
{
/* 0 */ 0x1000, 0x0B89, 0x0C3E, 0x0D9B, 0x1000, 0x0A2E, 0x0EC8, 0x0E7F,
/* 1 */ 0x0B89, 0x0851, 0x08D4, 0x09CF, 0x0B89, 0x0757, 0x0AA8, 0x0A73,
/* 2 */ 0x0C3E, 0x08D4, 0x095F, 0x0A6A, 0x0C3E, 0x07CB, 0x0B50, 0x0B18,
/* 3 */ 0x0D9B, 0x09CF, 0x0A6A, 0x0B92, 0x0D9B, 0x08A8, 0x0C92, 0x0C54,
/* 4 */ 0x1000, 0x0B89, 0x0C3E, 0x0D9B, 0x1000, 0x0A2E, 0x0EC8, 0x0E7F,
/* 5 */ 0x0A2E, 0x0757, 0x07CB, 0x08A8, 0x0A2E, 0x067A, 0x0968, 0x0939,
/* 6 */ 0x0EC8, 0x0AA8, 0x0B50, 0x0C92, 0x0EC8, 0x0968, 0x0DA8, 0x0D64,
/* 7 */ 0x0E7F, 0x0A73, 0x0B18, 0x0C54, 0x0E7F, 0x0939, 0x0D64, 0x0D23
};
const static UShort ZZTab[64] =
{
/* 0 */ 0x0, 0x2, 0xA, 0xC, 0x1C, 0x1E, 0x36, 0x38,
/* 1 */ 0x4, 0x8, 0xE, 0x1A, 0x20, 0x34, 0x3A, 0x54,
/* 2 */ 0x6, 0x10, 0x18, 0x22, 0x32, 0x3C, 0x52, 0x56,
/* 3 */ 0x12, 0x16, 0x24, 0x30, 0x3E, 0x50, 0x58, 0x6A,
/* 4 */ 0x14, 0x26, 0x2E, 0x40, 0x4E, 0x5A, 0x68, 0x6C,
/* 5 */ 0x28, 0x2C, 0x42, 0x4C, 0x5C, 0x66, 0x6E, 0x78,
/* 6 */ 0x2A, 0x44, 0x4A, 0x5E, 0x64, 0x70, 0x76, 0x7A,
/* 7 */ 0x46, 0x48, 0x60, 0x62, 0x72, 0x74, 0x7C, 0x7E
};
//Tao need to remove, write another version of abs
//#include <math.h>
/* ======================================================================== */
/* Function : cal_dc_scalerENC */
/* Date : 01/25/2000 */
/* Purpose : calculation of DC quantization scale according to the
incoming Q and type; */
/* In/out : */
/* Int Qp Quantizer */
/* Return : */
/* DC Scaler */
/* Modified : */
/* ======================================================================== */
/* ======================================================================== */
Int cal_dc_scalerENC(Int QP, Int type)
{
Int dc_scaler;
if (type == 1)
{
if (QP > 0 && QP < 5)
dc_scaler = 8;
else if (QP > 4 && QP < 9)
dc_scaler = 2 * QP;
else if (QP > 8 && QP < 25)
dc_scaler = QP + 8;
else
dc_scaler = 2 * QP - 16;
}
else
{
if (QP > 0 && QP < 5)
dc_scaler = 8;
else if (QP > 4 && QP < 25)
dc_scaler = (QP + 13) / 2;
else
dc_scaler = QP - 6;
}
return dc_scaler;
}
/***********************************************************************
Function: BlckQuantDequantH263
Date: June 15, 1999
Purpose: Combine BlockQuantH263 and BlockDequantH263ENC
Input: coeff=> DCT coefficient
Output: qcoeff=> quantized coefficient
rcoeff=> reconstructed coefficient
return CBP for this block
4/2/01, correct dc_scaler for short_header mode.
5/14/01,
changed the division into LUT multiplication/shift and other
modifications to speed up fastQuant/DeQuant (check for zero 1st, rowq LUT,
fast bitmaprow mask and borrowed Addition method instead of ifs from , ).
6/25/01,
Further optimization (~100K/QCIF), need more testing/comment before integration.
7/4/01, break up Inter / Intra function and merge for different cases.
7/22/01, combine AAN scaling here and reordering.
7/24/01, , reorder already done in FDCT, the input here is in the next block and
it's the
transpose of the raster scan. Output the same order (for proof of concenpt).
8/1/01, , change FDCT to do row/column FDCT without reordering, input is still
in the next block. The reconstructed DCT output is current block in normal
order. The quantized output is in zigzag scan order for INTER, row/column for
INTRA. Use bitmapzz for zigzag RunLevel for INTER. The quantization is done
in column/row scanning order.
8/2/01, , change IDCT to do column/row, change bitmaprow/col to the opposite.
8/3/01, , add clipping to the reconstructed coefficient [-2047,2047]
9/4/05, , removed scaling for AAN IDCT, use Chen IDCT instead.
********************************************************************/
Int BlockQuantDequantH263Inter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dummy, UChar shortHeader)
{
Int i, zz;
Int tmp, coeff, q_value;
Int QPdiv2 = QuantParam->QPdiv2;
Int QPx2 = QuantParam->QPx2;
Int Addition = QuantParam->Addition;
Int QPx2plus = QuantParam->QPx2plus;
Int round = 1 << 15;
Int q_scale = scaleArrayV[QuantParam->QP];
Int shift = 15 + (QPx2 >> 4);
Int *temp;
UChar *bcolptr = bitmapcol;
Int ac_clip; /* quantized coeff bound */
OSCL_UNUSED_ARG(comp);
OSCL_UNUSED_ARG(dummy);
if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */
else ac_clip = 2047; /* clip between [-2048,2047] */
/* reset all bitmap to zero */
temp = (Int*) bitmapcol;
temp[0] = temp[1] = 0;
bitmapzz[0] = bitmapzz[1] = 0;
*bitmaprow = 0;
QPx2plus <<= 4;
QPx2plus -= 8;
rcoeff += 64; /* actual data is 64 item ahead */
//end = rcoeff + dctMode - 1;
//rcoeff--;
bcolptr--;
i = 0;
do
{
bcolptr++;
//rcoeff++;
//i=0;
coeff = rcoeff[i];
if (coeff == 0x7fff) /* all zero column */
{
i++;
continue;
}
do
{
if (coeff >= -QPx2plus && coeff < QPx2plus) /* quantize to zero */
{
i += 8;
if (i < (dctMode << 3))
{
coeff = rcoeff[i];
if (coeff > -QPx2plus && coeff < QPx2plus) /* quantize to zero */
{
i += 8;
coeff = rcoeff[i];
continue;
}
else
goto NONZERO1;
}
}
else
{
NONZERO1:
/* scaling */
q_value = AANScale[i]; /* load scale AAN */
zz = ZZTab[i]; /* zigzag order */
coeff = aan_scale(q_value, coeff, round, QPdiv2);
q_value = coeff_quant(coeff, q_scale, shift);
/* dequantization */
if (q_value)
{
//coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, q_value));
q_value = coeff_clip(q_value, ac_clip);
qcoeff[zz>>1] = q_value;
// dequant and clip
//coeff = PV_MIN(2047,PV_MAX(-2048, q_value));
tmp = 2047;
coeff = coeff_dequant(q_value, QPx2, Addition, tmp);
rcoeff[i-64] = coeff;
(*bcolptr) |= imask[i>>3];
if ((zz >> 1) > 31) bitmapzz[1] |= (1 << (63 - (zz >> 1)));
else bitmapzz[0] |= (1 << (31 - (zz >> 1)));
}
i += 8;
coeff = rcoeff[i];
}
}
while (i < (dctMode << 3));
i += (1 - (dctMode << 3));
}
while (i < dctMode) ;
i = dctMode;
tmp = 1 << (8 - i);
while (i--)
{
if (bitmapcol[i])(*bitmaprow) |= tmp;
tmp <<= 1;
}
if (*bitmaprow)
return 1;
else
return 0;
}
Int BlockQuantDequantH263Intra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dc_scaler, UChar shortHeader)
{
Int i;
Int tmp, coeff, q_value;
Int QPx2 = QuantParam->QPx2;
Int Addition = QuantParam->Addition;
Int QPx2plus = QuantParam->QPx2plus;
Int round = 1 << 15;
Int q_scale = scaleArrayV[QuantParam->QP];
Int shift = 15 + (QPx2 >> 4);
UChar *bmcolptr = bitmapcol;
Int ac_clip; /* quantized coeff bound */
OSCL_UNUSED_ARG(bitmapzz);
OSCL_UNUSED_ARG(comp);
if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */
else ac_clip = 2047; /* clip between [-2048,2047] */
*((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;
*bitmaprow = 0;
QPx2plus = QPx2 << 4;
QPx2plus -= 8;
rcoeff += 64; /* actual data is 64 element ahead */
i = 0;
/* DC value */
coeff = *rcoeff;
/* scaling */
if (coeff == 0x7fff && !shortHeader) /* all zero column */
{
bmcolptr++;
i++;
}
else
{
if (coeff == 0x7fff) /* shortHeader on */
{
coeff = 1; /* can't be zero */
qcoeff[0] = coeff;
coeff = coeff * dc_scaler;
coeff = PV_MAX(-2048, PV_MIN(2047, coeff));
rcoeff[-64] = coeff;
bitmapcol[0] |= 128;
bmcolptr++;
//qcoeff++;
//rcoeff++;
//i=0;
i++;
}
else
{
q_value = round + (coeff << 12);
coeff = q_value >> 16;
if (coeff >= 0) coeff += (dc_scaler >> 1) ;
else coeff -= (dc_scaler >> 1) ;
q_value = scaleArrayV2[dc_scaler];
coeff = coeff * q_value;
coeff >>= (15 + (dc_scaler >> 4));
coeff += ((UInt)coeff >> 31);
if (shortHeader)
coeff = PV_MAX(1, PV_MIN(254, coeff));
if (coeff)
{
qcoeff[0] = coeff;
coeff = coeff * dc_scaler;
coeff = PV_MAX(-2048, PV_MIN(2047, coeff));
rcoeff[-64] = coeff;
bitmapcol[0] |= 128;
}
i += 8;
}
}
/* AC values */
do
{
coeff = rcoeff[i];
if (coeff == 0x7fff) /* all zero row */
{
bmcolptr++;
i++;
continue;
}
do
{
if (coeff >= -QPx2plus && coeff < QPx2plus) /* quantize to zero */
{
i += 8;
if (i < dctMode << 3)
{
coeff = rcoeff[i];
if (coeff > -QPx2plus && coeff < QPx2plus) /* quantize to zero */
{
i += 8;
coeff = rcoeff[i];
continue;
}
else
goto NONZERO2;
}
}
else
{
NONZERO2: /* scaling */
q_value = AANScale[i]; /* 09/02/05 */
/* scale aan */
q_value = smlabb(q_value, coeff, round);
coeff = q_value >> 16;
/* quant */
q_value = smulbb(q_scale, coeff); /*mov q_value, coeff, lsl #14 */
/*smull tmp, coeff, q_value, q_scale*/
q_value >>= shift;
q_value += ((UInt)q_value >> 31); /* add 1 if negative */
if (q_value)
{
//coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, q_value));
q_value = coeff_clip(q_value, ac_clip);
qcoeff[i] = q_value;
// dequant and clip
//coeff = PV_MIN(2047,PV_MAX(-2048, q_value));
tmp = 2047;
coeff = coeff_dequant(q_value, QPx2, Addition, tmp);
rcoeff[i-64] = coeff;
(*bmcolptr) |= imask[i>>3];
}
i += 8;
coeff = rcoeff[i];
}
}
while (i < (dctMode << 3)) ;
//qcoeff++; /* next column */
bmcolptr++;
//rcoeff++;
i += (1 - (dctMode << 3)); //i = 0;
}
while (i < dctMode);//while(rcoeff < end) ;
i = dctMode;
tmp = 1 << (8 - i);
while (i--)
{
if (bitmapcol[i])(*bitmaprow) |= tmp;
tmp <<= 1;
}
if (((*bitmaprow)&127) || (bitmapcol[0]&127)) /* exclude DC */
return 1;
else
return 0;
}
/***********************************************************************
Function: BlckQuantDequantH263DC
Date: 5/3/2001
Purpose: H.263 quantization mode, only for DC component
6/25/01,
Further optimization (~100K/QCIF), need more testing/comment before integration.
********************************************************************/
Int BlockQuantDequantH263DCInter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar *bitmaprow, UInt *bitmapzz, Int dummy, UChar shortHeader)
{
Int coeff, scale_q;
Int CBP = 0;
Int QP = QuantParam->QP;
Int QPx2plus = QuantParam->QPx2plus;
Int Addition = QuantParam->Addition;
Int shift = 15 + (QP >> 3);
Int ac_clip; /* quantized coeff bound */
Int tmp;
OSCL_UNUSED_ARG(dummy);
if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */
else ac_clip = 2047; /* clip between [-2048,2047] */
*bitmaprow = 0;
bitmapzz[0] = bitmapzz[1] = 0;
coeff = rcoeff[0];
if (coeff >= -QPx2plus && coeff < QPx2plus)
{
rcoeff[0] = 0;
return CBP;//rcoeff[0] = 0; not needed since CBP will be zero
}
else
{
scale_q = scaleArrayV[QP];
coeff = aan_dc_scale(coeff, QP);
scale_q = coeff_quant(coeff, scale_q, shift);
//coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, tmp));
scale_q = coeff_clip(scale_q, ac_clip);
qcoeff[0] = scale_q;
QP <<= 1;
//coeff = PV_MIN(2047,PV_MAX(-2048, tmp));
tmp = 2047;
coeff = coeff_dequant(scale_q, QP, Addition, tmp);
rcoeff[0] = coeff;
(*bitmaprow) = 128;
bitmapzz[0] = (ULong)1 << 31;
CBP = 1;
}
return CBP;
}
Int BlockQuantDequantH263DCIntra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler, UChar shortHeader)
{
Int tmp, coeff;
OSCL_UNUSED_ARG(QuantParam);
*bitmaprow = 0;
coeff = rcoeff[0];
if (coeff >= 0) coeff += (dc_scaler >> 1) ;
else coeff -= (dc_scaler >> 1) ;
tmp = scaleArrayV2[dc_scaler];
tmp = coeff * tmp;
tmp >>= (15 + (dc_scaler >> 4));
tmp += ((UInt)tmp >> 31);
if (shortHeader)
tmp = PV_MAX(1, PV_MIN(254, tmp));
if (tmp)
{
qcoeff[0] = tmp;
coeff = tmp * dc_scaler;
coeff = PV_MAX(-2048, PV_MIN(2047, coeff));
rcoeff[0] = coeff;
*bitmaprow = 128;
bitmapzz[0] = (ULong)1 << 31;
}
return 0;
}
#ifndef NO_MPEG_QUANT
/***********************************************************************
Function: BlckQuantDequantMPEG
Date: June 15, 1999
Purpose: Combine BlockQuantMPEG and BlockDequantMPEGENC
Input: coeff=> DCT coefficient
Output: qcoeff=> quantized coefficient
rcoeff=> reconstructed coefficient
Modified: 7/5/01, break up function for Intra/Inter
8/3/01, update with changes from H263 quant mode.
8/3/01, add clipping to the reconstructed coefficient [-2048,2047]
8/6/01, optimize using multiplicative lookup-table.
can be further optimized using ARM assembly, e.g.,
clipping, 16-bit mult., etc !!!!!!!!!!!!!
********************************************************************/
Int BlockQuantDequantMPEGInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dc_scaler)
{
Int i, zz;
Int tmp, coeff, q_value = 0;
Int sum = 0;
Int stepsize, QPx2 = QP << 1;
Int CBP = 0;
Int round = 1 << 15;
Int q_scale = scaleArrayV[QP];
Int shift = 15 + (QP >> 3);
UChar *bcolptr = bitmapcol;
OSCL_UNUSED_ARG(dc_scaler);
OSCL_UNUSED_ARG(comp);
*((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;
bitmapzz[0] = bitmapzz[1] = 0;
*bitmaprow = 0;
rcoeff += 64;
i = 0;
bcolptr--;
do
{
bcolptr++;
coeff = rcoeff[i];
if (coeff == 0x7fff) /* all zero column */
{
i++;
continue;
}
do
{
q_value = AANScale[i]; /* 09/02/05 scaling for AAN*/
/* aan scaling */
q_value = smlabb(q_value, coeff, round);
coeff = q_value >> 16;
stepsize = qmat[i];
// if(coeff>0) coeff = (16*coeff + (stepsize/2)) / stepsize;
// else coeff = (16*coeff - (stepsize/2)) / stepsize;
coeff <<= 4;
if (coeff >= 0) coeff += (stepsize >> 1) ;
else coeff -= (stepsize >> 1) ;
q_value = scaleArrayV2[stepsize];
/* mpeg quant table scale */
coeff = smulbb(coeff, q_value);
coeff >>= (15 + (stepsize >> 4));
coeff += ((UInt)coeff >> 31);
/* QP scale */
if (coeff >= -QPx2 && coeff < QPx2) /* quantized to zero*/
{
i += 8;
}
else
{
// q_value = coeff/(QPx2);
q_value = coeff_quant(coeff, q_scale, shift);
if (q_value) /* dequant */
{
zz = ZZTab[i]; /* zigzag order */
tmp = 2047;
q_value = clip_2047(q_value, tmp);
qcoeff[zz>>1] = q_value;
//q_value=(((coeff*2)+SIGN0(coeff))*stepsize*QP)/16;
/* no need for SIGN0, no zero coming in this {} */
q_value = coeff_dequant_mpeg(q_value, stepsize, QP, tmp);
rcoeff[i-64] = q_value;
sum += q_value;
(*bcolptr) |= imask[i>>3];
if ((zz >> 1) > 31) bitmapzz[1] |= (1 << (63 - (zz >> 1)));
else bitmapzz[0] |= (1 << (31 - (zz >> 1)));
}
i += 8;
}
coeff = rcoeff[i];
}
while (i < (dctMode << 3)) ;
i += (1 - (dctMode << 3));
}
while (i < dctMode) ;
i = dctMode;
tmp = 1 << (8 - i);
while (i--)
{
if (bitmapcol[i])(*bitmaprow) |= tmp;
tmp <<= 1;
}
if (*bitmaprow)
CBP = 1; /* check CBP before mismatch control, 7/5/01 */
/* Mismatch control, 5/3/01 */
if (CBP)
{
if ((sum&0x1) == 0)
{
rcoeff--; /* rcoeff[63] */
coeff = *rcoeff;
coeff ^= 0x1;
*rcoeff = coeff;
if (coeff)
{
bitmapcol[7] |= 1;
(*bitmaprow) |= 1;
}
}
}
return CBP;
}
Int BlockQuantDequantMPEGIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dc_scaler)
{
Int i;
Int tmp, coeff, q_value = 0;
Int sum = 0;
Int stepsize;
Int CBP = 0;
Int round = 1 << 15;
Int q_scale = scaleArrayV[QP];
Int shift = 15 + (QP >> 3);
Int round2 = (3 * QP + 2) >> 2;
Int QPx2plus = (QP << 1) - round2;
UChar *bmcolptr = bitmapcol;
OSCL_UNUSED_ARG(bitmapzz);
OSCL_UNUSED_ARG(comp);
*((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;
*bitmaprow = 0;
rcoeff += 64;
i = 0;
/* DC value */
coeff = *rcoeff;
if (coeff == 0x7fff) /* all zero column */
{
bmcolptr++;
i++;
}
else
{
q_value = round + (coeff << 12);
coeff = q_value >> 16;
/*if (coeff >= 0) coeff = (coeff + (dc_scaler/2)) / dc_scaler;
else coeff = (coeff - (dc_scaler/2)) / dc_scaler;*/
if (coeff >= 0) coeff += (dc_scaler >> 1) ;
else coeff -= (dc_scaler >> 1) ;
q_value = scaleArrayV2[dc_scaler];
/* mpeg quant table scale */
coeff = smulbb(coeff, q_value);
coeff >>= (15 + (dc_scaler >> 4));
coeff += ((UInt)coeff >> 31);
if (coeff)
{
coeff = PV_MAX(1, PV_MIN(254, coeff));
qcoeff[0] = coeff;
coeff = smulbb(coeff, dc_scaler);
q_value = clip_2047(coeff, 2047);
sum = q_value;
rcoeff[-64] = q_value;
bitmapcol[0] |= 128;
}
i += 8;
}
/* AC values */
do
{
coeff = rcoeff[i];
if (coeff == 0x7fff) /* all zero row */
{
bmcolptr++;
i++;
continue;
}
do
{
/* scaling */
q_value = AANScale[i]; /* 09/02/05 */
/* q_value = coeff*q_value + round */
q_value = smlabb(coeff, q_value, round);
coeff = q_value >> 16;
stepsize = qmat[i];
/*if(coeff>0) coeff = (16*coeff + (stepsize/2)) / stepsize;
else coeff = (16*coeff - (stepsize/2)) / stepsize;*/
coeff <<= 4;
if (coeff >= 0) coeff += (stepsize >> 1) ;
else coeff -= (stepsize >> 1) ;
q_value = scaleArrayV2[stepsize];
/* scale mpeg quant */
coeff = smulbb(coeff, q_value);
coeff >>= (15 + (stepsize >> 4));
coeff += ((UInt)coeff >> 31);
if (coeff >= -QPx2plus && coeff < QPx2plus)
{
i += 8;
}
else
{
//q_value = ( coeff + SIGN0(coeff)*((3*QP+2)/4))/(2*QP);
if (coeff > 0) coeff += round2;
else if (coeff < 0) coeff -= round2;
q_value = smulbb(coeff, q_scale);
q_value >>= shift;
q_value += ((UInt)q_value >> 31);
if (q_value)
{
tmp = 2047;
q_value = clip_2047(q_value, tmp);
qcoeff[i] = q_value;
stepsize = smulbb(stepsize, QP);
q_value = smulbb(q_value, stepsize);
q_value = coeff_dequant_mpeg_intra(q_value, tmp);
//q_value = (coeff*stepsize*QP*2)/16;
rcoeff[i-64] = q_value;
sum += q_value;
(*bmcolptr) |= imask[i>>3];
}
i += 8;
}
coeff = rcoeff[i];
}
while (i < (dctMode << 3)) ;
bmcolptr++;
i += (1 - (dctMode << 3));
}
while (i < dctMode) ;
i = dctMode;
tmp = 1 << (8 - i);
while (i--)
{
if (bitmapcol[i])(*bitmaprow) |= tmp;
tmp <<= 1;
}
if (((*bitmaprow) &127) || (bitmapcol[0]&127))
CBP = 1; /* check CBP before mismatch control, 7/5/01 */
/* Mismatch control, 5/3/01 */
if (CBP || bitmapcol[0])
{
if ((sum&0x1) == 0)
{
rcoeff--; /* rcoeff[63] */
coeff = *rcoeff;
coeff ^= 0x1;
*rcoeff = coeff;
if (coeff)
{
bitmapcol[7] |= 1;
(*bitmaprow) |= 1;
}
}
}
return CBP;
}
/***********************************************************************
Function: BlckQuantDequantMPEGDC
Date: 5/3/2001
Purpose: MPEG Quant/Dequant for DC only block.
********************************************************************/
Int BlockQuantDequantMPEGDCInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy)
{
Int q_value, coeff, stepsize;
Int CBP = 0;
Int q_scale = scaleArrayV[QP];
Int shift = 15 + (QP >> 3);
Int QPx2 = QP << 1;
OSCL_UNUSED_ARG(dummy);
*((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;
*bitmaprow = 0;
bitmapzz[0] = bitmapzz[1] = 0;
coeff = rcoeff[0];
stepsize = qmat[0];
/*if(coeff>0) coeff = (16*coeff + (stepsize/2)) / stepsize;
else coeff = (16*coeff - (stepsize/2)) / stepsize;*/
coeff <<= 4;
if (coeff >= 0) coeff += (stepsize >> 1) ;
else coeff -= (stepsize >> 1) ;
q_value = scaleArrayV2[stepsize];
coeff = smulbb(coeff, q_value);
coeff >>= (15 + (stepsize >> 4));
coeff += ((UInt)coeff >> 31);
if (coeff >= -QPx2 && coeff < QPx2)
{
rcoeff[0] = 0;
return CBP;
}
else
{
// q_value = coeff/(QPx2);
q_value = coeff_quant(coeff, q_scale, shift);
if (q_value)
{
//PV_MIN(2047,PV_MAX(-2048, q_value));
q_value = clip_2047(q_value, 2047);
qcoeff[0] = q_value;
q_value = coeff_dequant_mpeg(q_value, stepsize, QP, 2047);
//q_value=(((coeff*2)+SIGN0(coeff))*stepsize*QP)/16;
rcoeff[0] = q_value;
bitmapcol[0] = 128;
(*bitmaprow) = 128;
bitmapzz[0] = (UInt)1 << 31;
CBP = 1;
/* Mismatch control, 5/3/01 */
if ((q_value&0x1) == 0)
{
rcoeff[63] = 1; /* after scaling it remains the same */
bitmapcol[7] |= 1;
(*bitmaprow) |= 1;
}
}
}
return CBP;
}
Int BlockQuantDequantMPEGDCIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dc_scaler)
{
Int tmp, coeff, q_value;
OSCL_UNUSED_ARG(QP);
OSCL_UNUSED_ARG(qmat);
*((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;
*bitmaprow = 0;
coeff = rcoeff[0];
/*if (coeff >= 0) tmp = (coeff + dc_scaler/2) / dc_scaler;
else tmp = (coeff - dc_scaler/2) / dc_scaler;*/
if (coeff >= 0) coeff += (dc_scaler >> 1) ;
else coeff -= (dc_scaler >> 1) ;
tmp = scaleArrayV2[dc_scaler];
tmp = smulbb(tmp, coeff);
tmp >>= (15 + (dc_scaler >> 4));
tmp += ((UInt)tmp >> 31);
if (tmp)
{
coeff = PV_MAX(1, PV_MIN(254, tmp));
qcoeff[0] = coeff;
q_value = smulbb(coeff, dc_scaler);
q_value = clip_2047(q_value, 2047);
rcoeff[0] = q_value;
bitmapcol[0] = 128;
*bitmaprow = 128;
bitmapzz[0] = (UInt)1 << 31;
/* Mismatch control, 5/3/01 */
if ((q_value&0x1) == 0)
{
rcoeff[63] = 1; /* after scaling it remains the same */
bitmapcol[7] |= 1;
(*bitmaprow) |= 1;
}
}
return 0;
}
#endif

View File

@ -0,0 +1,625 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*********************************************************************************/
/* Filename: fastquant_inline.h */
/* Description: Implementation for in-line functions used in dct.cpp */
/* Modified: */
/*********************************************************************************/
#ifndef _FASTQUANT_INLINE_H_
#define _FASTQUANT_INLINE_H_
#include "mp4def.h"
#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER */
__inline int32 aan_scale(int32 q_value, int32 coeff, int32 round, int32 QPdiv2)
{
q_value = coeff * q_value + round;
coeff = q_value >> 16;
if (coeff < 0) coeff += QPdiv2;
else coeff -= QPdiv2;
return coeff;
}
__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)
{
int32 q_value;
q_value = coeff * q_scale; //q_value = -((-(coeff + QPdiv2)*q_scale)>>LSL);
q_value >>= shift; //q_value = (((coeff - QPdiv2)*q_scale)>>LSL );
q_value += ((UInt)q_value >> 31); /* add one if negative */
return q_value;
}
__inline int32 coeff_clip(int32 q_value, int32 ac_clip)
{
int32 coeff = q_value + ac_clip;
if ((UInt)coeff > (UInt)(ac_clip << 1))
q_value = ac_clip ^(q_value >> 31);
return q_value;
}
__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)
{
int32 coeff;
OSCL_UNUSED_ARG(tmp);
if (q_value < 0)
{
coeff = q_value * QPx2 - Addition;
if (coeff < -2048)
coeff = -2048;
}
else
{
coeff = q_value * QPx2 + Addition;
if (coeff > 2047)
coeff = 2047;
}
return coeff;
}
__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)
{
q_value = coeff * q_value + round;
return q_value;
}
__inline int32 smulbb(int32 q_scale, int32 coeff)
{
int32 q_value;
q_value = coeff * q_scale;
return q_value;
}
__inline int32 aan_dc_scale(int32 coeff, int32 QP)
{
if (coeff < 0) coeff += (QP >> 1);
else coeff -= (QP >> 1);
return coeff;
}
__inline int32 clip_2047(int32 q_value, int32 tmp)
{
OSCL_UNUSED_ARG(tmp);
if (q_value < -2048)
{
q_value = -2048;
}
else if (q_value > 2047)
{
q_value = 2047;
}
return q_value;
}
__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)
{
int32 coeff;
OSCL_UNUSED_ARG(tmp);
coeff = q_value << 1;
stepsize *= QP;
if (coeff > 0)
{
q_value = (coeff + 1) * stepsize;
q_value >>= 4;
if (q_value > 2047) q_value = 2047;
}
else
{
q_value = (coeff - 1) * stepsize;
q_value += 15;
q_value >>= 4;
if (q_value < -2048) q_value = -2048;
}
return q_value;
}
__inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp)
{
OSCL_UNUSED_ARG(tmp);
q_value <<= 1;
if (q_value > 0)
{
q_value >>= 4;
if (q_value > 2047) q_value = 2047;
}
else
{
q_value += 15;
q_value >>= 4;
if (q_value < -2048) q_value = -2048;
}
return q_value;
}
#elif defined(__CC_ARM) /* only work with arm v5 */
#if defined(__TARGET_ARCH_5TE)
__inline int32 aan_scale(int32 q_value, int32 coeff,
int32 round, int32 QPdiv2)
{
__asm
{
smlabb q_value, coeff, q_value, round
movs coeff, q_value, asr #16
addle coeff, coeff, QPdiv2
subgt coeff, coeff, QPdiv2
}
return coeff;
}
__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)
{
int32 q_value;
__asm
{
smulbb q_value, q_scale, coeff /*mov coeff, coeff, lsl #14*/
mov coeff, q_value, asr shift /*smull tmp, coeff, q_scale, coeff*/
add q_value, coeff, coeff, lsr #31
}
return q_value;
}
__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)
{
int32 coeff;
__asm
{
cmp q_value, #0
smulbb coeff, q_value, QPx2
sublt coeff, coeff, Addition
addge coeff, coeff, Addition
add q_value, coeff, tmp
subs q_value, q_value, #3840
subcss q_value, q_value, #254
eorhi coeff, tmp, coeff, asr #31
}
return coeff;
}
__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)
{
__asm
{
smlabb q_value, coeff, q_value, round
}
return q_value;
}
__inline int32 smulbb(int32 q_scale, int32 coeff)
{
int32 q_value;
__asm
{
smulbb q_value, q_scale, coeff
}
return q_value;
}
__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)
{
/* tmp must have value of 2047 */
int32 coeff;
__asm
{
movs coeff, q_value, lsl #1
smulbb stepsize, stepsize, QP
addgt coeff, coeff, #1
sublt coeff, coeff, #1
smulbb q_value, coeff, stepsize
addlt q_value, q_value, #15
mov q_value, q_value, asr #4
add coeff, q_value, tmp
subs coeff, coeff, #0xf00
subcss coeff, coeff, #0xfe
eorhi q_value, tmp, q_value, asr #31
}
return q_value;
}
#else // not ARMV5TE
__inline int32 aan_scale(int32 q_value, int32 coeff,
int32 round, int32 QPdiv2)
{
__asm
{
mla q_value, coeff, q_value, round
movs coeff, q_value, asr #16
addle coeff, coeff, QPdiv2
subgt coeff, coeff, QPdiv2
}
return coeff;
}
__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)
{
int32 q_value;
__asm
{
mul q_value, q_scale, coeff /*mov coeff, coeff, lsl #14*/
mov coeff, q_value, asr shift /*smull tmp, coeff, q_scale, coeff*/
add q_value, coeff, coeff, lsr #31
}
return q_value;
}
__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)
{
int32 coeff;
__asm
{
cmp q_value, #0
mul coeff, q_value, QPx2
sublt coeff, coeff, Addition
addge coeff, coeff, Addition
add q_value, coeff, tmp
subs q_value, q_value, #3840
subcss q_value, q_value, #254
eorhi coeff, tmp, coeff, asr #31
}
return coeff;
}
__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)
{
__asm
{
mla q_value, coeff, q_value, round
}
return q_value;
}
__inline int32 smulbb(int32 q_scale, int32 coeff)
{
int32 q_value;
__asm
{
mul q_value, q_scale, coeff
}
return q_value;
}
__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)
{
/* tmp must have value of 2047 */
int32 coeff;
__asm
{
movs coeff, q_value, lsl #1
mul stepsize, stepsize, QP
addgt coeff, coeff, #1
sublt coeff, coeff, #1
mul q_value, coeff, stepsize
addlt q_value, q_value, #15
mov q_value, q_value, asr #4
add coeff, q_value, tmp
subs coeff, coeff, #0xf00
subcss coeff, coeff, #0xfe
eorhi q_value, tmp, q_value, asr #31
}
return q_value;
}
#endif
__inline int32 coeff_clip(int32 q_value, int32 ac_clip)
{
int32 coeff;
__asm
{
add coeff, q_value, ac_clip
subs coeff, coeff, ac_clip, lsl #1
eorhi q_value, ac_clip, q_value, asr #31
}
return q_value;
}
__inline int32 aan_dc_scale(int32 coeff, int32 QP)
{
__asm
{
cmp coeff, #0
addle coeff, coeff, QP, asr #1
subgt coeff, coeff, QP, asr #1
}
return coeff;
}
__inline int32 clip_2047(int32 q_value, int32 tmp)
{
/* tmp must have value of 2047 */
int32 coeff;
__asm
{
add coeff, q_value, tmp
subs coeff, coeff, #0xf00
subcss coeff, coeff, #0xfe
eorhi q_value, tmp, q_value, asr #31
}
return q_value;
}
__inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp)
{
int32 coeff;
__asm
{
movs q_value, q_value, lsl #1
addlt q_value, q_value, #15
mov q_value, q_value, asr #4
add coeff, q_value, tmp
subs coeff, coeff, #0xf00
subcss coeff, coeff, #0xfe
eorhi q_value, tmp, q_value, asr #31
}
return q_value;
}
#elif ( defined(PV_ARM_GCC_V4) || defined(PV_ARM_GCC_V5) ) /* ARM GNU COMPILER */
__inline int32 aan_scale(int32 q_value, int32 coeff,
int32 round, int32 QPdiv2)
{
register int32 out;
register int32 qv = q_value;
register int32 cf = coeff;
register int32 rr = round;
register int32 qp = QPdiv2;
asm volatile("smlabb %0, %2, %1, %3\n\t"
"movs %0, %0, asr #16\n\t"
"addle %0, %0, %4\n\t"
"subgt %0, %0, %4"
: "=&r"(out)
: "r"(qv),
"r"(cf),
"r"(rr),
"r"(qp));
return out;
}
__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)
{
register int32 out;
register int32 temp1;
register int32 cc = coeff;
register int32 qs = q_scale;
register int32 ss = shift;
asm volatile("smulbb %0, %3, %2\n\t"
"mov %1, %0, asr %4\n\t"
"add %0, %1, %1, lsr #31"
: "=&r"(out),
"=&r"(temp1)
: "r"(cc),
"r"(qs),
"r"(ss));
return out;
}
__inline int32 coeff_clip(int32 q_value, int32 ac_clip)
{
register int32 coeff;
asm volatile("add %1, %0, %2\n\t"
"subs %1, %1, %2, lsl #1\n\t"
"eorhi %0, %2, %0, asr #31"
: "+r"(q_value),
"=&r"(coeff)
: "r"(ac_clip));
return q_value;
}
__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)
{
register int32 out;
register int32 temp1;
register int32 qv = q_value;
register int32 qp = QPx2;
register int32 aa = Addition;
register int32 tt = tmp;
asm volatile("cmp %2, #0\n\t"
"mul %0, %2, %3\n\t"
"sublt %0, %0, %4\n\t"
"addge %0, %0, %4\n\t"
"add %1, %0, %5\n\t"
"subs %1, %1, #3840\n\t"
"subcss %1, %1, #254\n\t"
"eorhi %0, %5, %0, asr #31"
: "=&r"(out),
"=&r"(temp1)
: "r"(qv),
"r"(qp),
"r"(aa),
"r"(tt));
return out;
}
__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)
{
register int32 out;
register int32 aa = (int32)q_value;
register int32 bb = (int32)coeff;
register int32 cc = (int32)round;
asm volatile("smlabb %0, %1, %2, %3"
: "=&r"(out)
: "r"(aa),
"r"(bb),
"r"(cc));
return out;
}
__inline int32 smulbb(int32 q_scale, int32 coeff)
{
register int32 out;
register int32 aa = (int32)q_scale;
register int32 bb = (int32)coeff;
asm volatile("smulbb %0, %1, %2"
: "=&r"(out)
: "r"(aa),
"r"(bb));
return out;
}
__inline int32 aan_dc_scale(int32 coeff, int32 QP)
{
register int32 out;
register int32 cc = coeff;
register int32 qp = QP;
asm volatile("cmp %1, #0\n\t"
"addle %0, %1, %2, asr #1\n\t"
"subgt %0, %1, %2, asr #1"
: "=&r"(out)
: "r"(cc),
"r"(qp));
return out;
}
__inline int32 clip_2047(int32 q_value, int32 tmp)
{
register int32 coeff;
asm volatile("add %1, %0, %2\n\t"
"subs %1, %1, #0xF00\n\t"
"subcss %1, %1, #0xFE\n\t"
"eorhi %0, %2, %0, asr #31"
: "+r"(q_value),
"=&r"(coeff)
: "r"(tmp));
return q_value;
}
__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)
{
register int32 out;
register int32 temp1;
register int32 qv = q_value;
register int32 ss = stepsize;
register int32 qp = QP;
register int32 tt = tmp;
asm volatile("movs %1, %2, lsl #1\n\t"
"mul %0, %3, %4\n\t"
"addgt %1, %1, #1\n\t"
"sublt %1, %1, #1\n\t"
"mul %0, %1, %0\n\t"
"addlt %0, %0, #15\n\t"
"mov %0, %0, asr #4\n\t"
"add %1, %0, %5\n\t"
"subs %1, %1, #0xF00\n\t"
"subcss %1, %1, #0xFE\n\t"
"eorhi %0, %5, %0, asr #31"
: "=&r"(out),
"=&r"(temp1)
: "r"(qv),
"r"(ss),
"r"(qp),
"r"(tt));
return out;
}
__inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp)
{
register int32 out;
register int32 temp1;
register int32 qv = q_value;
register int32 tt = tmp;
asm volatile("movs %1, %2, lsl #1\n\t"
"addlt %1, %1, #15\n\t"
"mov %0, %1, asr #4\n\t"
"add %1, %0, %3\n\t"
"subs %1, %1, #0xF00\n\t"
"subcss %1, %1, #0xFE\n\t"
"eorhi %0, %3, %0, asr #31"
: "=&r"(out),
"=&r"(temp1)
: "r"(qv),
"r"(tt));
return out;
}
#endif // Platform
#endif //_FASTQUANT_INLINE_H_

View File

@ -0,0 +1,287 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4enc_lib.h"
#include "mp4lib_int.h"
#include "m4venc_oscl.h"
/* 3/29/01 fast half-pel search based on neighboring guess */
/* value ranging from 0 to 4, high complexity (more accurate) to
low complexity (less accurate) */
#define HP_DISTANCE_TH 2 /* half-pel distance threshold */
#define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/
#ifdef __cplusplus
extern "C"
{
#endif
void GenerateSearchRegion(UChar *searchPadding, UChar *ref, Int width, Int height,
Int ilow, Int ihigh, Int jlow, Int jhigh);
void InterpDiag(UChar *prev, Int lx, UChar *pred_block);
void InterpHorz(UChar *prev, Int lx, UChar *pred_block);
void InterpVert(UChar *prev, Int lx, UChar *pred_block);
#ifdef __cplusplus
}
#endif
const static Int distance_tab[9][9] = /* [hp_guess][k] */
{
{0, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 0, 1, 2, 3, 4, 3, 2, 1},
{1, 0, 0, 0, 1, 2, 3, 2, 1},
{1, 2, 1, 0, 1, 2, 3, 4, 3},
{1, 2, 1, 0, 0, 0, 1, 2, 3},
{1, 4, 3, 2, 1, 0, 1, 2, 3},
{1, 2, 3, 2, 1, 0, 0, 0, 1},
{1, 2, 3, 4, 3, 2, 1, 0, 1},
{1, 0, 1, 2, 3, 2, 1, 0, 0}
};
/*=====================================================================
Function: FindHalfPelMB
Date: 10/7/2000
Purpose: Find half pel resolution MV surrounding the full-pel MV
=====================================================================*/
void FindHalfPelMB(VideoEncData *video, UChar *cur, MOT *mot, UChar *ncand,
Int xpos, Int ypos, Int *xhmin, Int *yhmin, Int hp_guess)
{
// hp_mem = ULong *vertArray; /* 20x17 */
// ULong *horzArray; /* 20x16 */
// ULong *diagArray; /* 20x17 */
Int dmin, d;
Int xh, yh;
Int k, kmin = 0;
Int imin, jmin, ilow, jlow;
Int h263_mode = video->encParams->H263_Enabled; /* 3/29/01 */
Int in_range[9] = {0, 1, 1, 1, 1, 1, 1, 1, 1}; /* 3/29/01 */
Int range = video->encParams->SearchRange;
Int lx = video->currVop->pitch;
Int width = video->currVop->width; /* padding */
Int height = video->vol[video->currLayer]->height;
Int(**SAD_MB_HalfPel)(UChar*, UChar*, Int, void*) =
video->functionPointer->SAD_MB_HalfPel;
void *extra_info = video->sad_extra_info;
Int next_hp_pos[9][2] = {{0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}, {0, -1}};
Int next_ncand[9] = {0, 1 , lx, lx, 0, -1, -1, -lx, -lx};
cur = video->currYMB;
/**************** check range ***************************/
/* 3/29/01 */
imin = xpos + (mot[0].x >> 1);
jmin = ypos + (mot[0].y >> 1);
ilow = xpos - range;
jlow = ypos - range;
if (!h263_mode)
{
if (imin <= -15 || imin == ilow)
in_range[1] = in_range[7] = in_range[8] = 0;
else if (imin >= width - 1)
in_range[3] = in_range[4] = in_range[5] = 0;
if (jmin <= -15 || jmin == jlow)
in_range[1] = in_range[2] = in_range[3] = 0;
else if (jmin >= height - 1)
in_range[5] = in_range[6] = in_range[7] = 0;
}
else
{
if (imin <= 0 || imin == ilow)
in_range[1] = in_range[7] = in_range[8] = 0;
else if (imin >= width - 16)
in_range[3] = in_range[4] = in_range[5] = 0;
if (jmin <= 0 || jmin == jlow)
in_range[1] = in_range[2] = in_range[3] = 0;
else if (jmin >= height - 16)
in_range[5] = in_range[6] = in_range[7] = 0;
}
xhmin[0] = 0;
yhmin[0] = 0;
dmin = mot[0].sad;
xh = 0;
yh = -1;
ncand -= lx; /* initial position */
for (k = 2; k <= 8; k += 2)
{
if (distance_tab[hp_guess][k] < HP_DISTANCE_TH)
{
if (in_range[k])
{
d = (*(SAD_MB_HalfPel[((yh&1)<<1)+(xh&1)]))(ncand, cur, (dmin << 16) | lx, extra_info);
if (d < dmin)
{
dmin = d;
xhmin[0] = xh;
yhmin[0] = yh;
kmin = k;
}
else if (d == dmin &&
PV_ABS(mot[0].x + xh) + PV_ABS(mot[0].y + yh) < PV_ABS(mot[0].x + xhmin[0]) + PV_ABS(mot[0].y + yhmin[0]))
{
xhmin[0] = xh;
yhmin[0] = yh;
kmin = k;
}
}
}
xh += next_hp_pos[k][0];
yh += next_hp_pos[k][1];
ncand += next_ncand[k];
if (k == 8)
{
if (xhmin[0] != 0 || yhmin[0] != 0)
{
k = -1;
hp_guess = kmin;
}
}
}
mot[0].sad = dmin;
mot[0].x += xhmin[0];
mot[0].y += yhmin[0];
return ;
}
#ifndef NO_INTER4V
/*=====================================================================
Function: FindHalfPelBlk
Date: 10/7/2000
Purpose: Find half pel resolution MV surrounding the full-pel MV
And decide between 1MV or 4MV mode
=====================================================================*/
///// THIS FUNCTION IS NOT WORKING!!! NEED TO BE RIVISITED
Int FindHalfPelBlk(VideoEncData *video, UChar *cur, MOT *mot, Int sad16, UChar *ncand8[],
UChar *mode, Int xpos, Int ypos, Int *xhmin, Int *yhmin, UChar *hp_mem)
{
Int k, comp;
Int xh, yh;//, xhmin, yhmin;
Int imin, jmin, ilow, jlow;
Int height;
UChar *cand, *cur8;
UChar *hmem;//[17*17]; /* half-pel memory */
Int d, dmin, sad8;
Int lx = video->currVop->pitch;
Int width = video->currVop->width; /* , padding */
Int(*SAD_Blk_HalfPel)(UChar*, UChar*, Int, Int, Int, Int, Int, void*) = video->functionPointer->SAD_Blk_HalfPel;
void *extra_info = video->sad_extra_info;
Int in_range[8]; /* 3/29/01 */
Int range = video->encParams->SearchRange;
Int swidth;
Int next_hp_pos[8][2] = {{1, 0}, {1, 0}, {0, 1}, {0, 1}, { -1, 0}, { -1, 0}, {0, -1}, {0, -1}};
height = video->vol[video->currLayer]->height;
hmem = hp_mem;
sad8 = 0;
for (comp = 0; comp < 4; comp++)
{
#ifdef _SAD_STAT
num_HP_Blk++;
#endif
/**************** check range ***************************/
/* 3/29/01 */
M4VENC_MEMSET(in_range, 1, sizeof(Int) << 3);
imin = xpos + ((comp & 1) << 3) + (mot[comp+1].x >> 1);
jmin = ypos + ((comp & 2) << 2) + (mot[comp+1].y >> 1);
ilow = xpos + ((comp & 1) << 3) - range;
jlow = ypos + ((comp & 2) << 2) - range;
if (imin <= -15 || imin == ilow)
in_range[0] = in_range[6] = in_range[7] = 0;
else if (imin >= width - 1)
in_range[2] = in_range[3] = in_range[4] = 0;
if (jmin <= -15 || jmin == jlow)
in_range[0] = in_range[1] = in_range[2] = 0;
else if (jmin >= height - 1)
in_range[4] = in_range[5] = in_range[6] = 0;
/**************** half-pel search ***********************/
cur8 = cur + ((comp & 1) << 3) + ((comp & 2) << 2) * width ;
/* generate half-pel search region */
{
cand = ncand8[comp+1];
swidth = lx;
}
xhmin[comp+1] = 0;
yhmin[comp+1] = 0;
dmin = mot[comp+1].sad;
xh = -1;
yh = -1;
for (k = 0; k < 8; k++)
{
if (in_range[k])
{
d = (*SAD_Blk_HalfPel)(cand, cur8, dmin, lx, swidth, xh, yh, extra_info);
if (d < dmin)
{
dmin = d;
xhmin[comp+1] = xh;
yhmin[comp+1] = yh;
}
}
xh += next_hp_pos[k][0];
yh += next_hp_pos[k][1];
}
/********************************************/
mot[comp+1].x += xhmin[comp+1];
mot[comp+1].y += yhmin[comp+1];
mot[comp+1].sad = dmin;
sad8 += dmin;
if (sad8 >= sad16 - PREF_16_VEC)
{
*mode = MODE_INTER;
for (k = 1; k <= 4; k++)
{
mot[k].sad = (mot[0].sad + 2) >> 2;
mot[k].x = mot[0].x;
mot[k].y = mot[0].y;
}
return sad8;
}
hmem += (10 * 10);
}
*mode = MODE_INTER4V;
return sad8;
}
#endif /* NO_INTER4V */

View File

@ -0,0 +1,43 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*********************************************************************************/
/* Revision History */
/* Date: 11/04/05 */
/* Description: Created for abstracting out OSCL such that the code can be used */
/* by both V3 and V4 OSCL library. This file is for V4. */
/*********************************************************************************/
#ifndef _M4VENC_OSCL_H_
#define _M4VENC_OSCL_H_
#include <stdlib.h>
#include <math.h>
#define M4VENC_MALLOC(size) malloc(size)
#define M4VENC_FREE(ptr) free(ptr)
#define M4VENC_MEMSET(ptr,val,size) memset(ptr,val,size)
#define M4VENC_MEMCPY(dst,src,size) memcpy(dst,src,size)
#define M4VENC_LOG(x) log(x)
#define M4VENC_SQRT(x) sqrt(x)
#define M4VENC_POW(x,y) pow(x,y)
#define M4VENC_HAS_SYMBIAN_SUPPORT OSCL_HAS_SYMBIAN_SUPPORT
#endif //_M4VENC_OSCL_H_

View File

@ -0,0 +1,386 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4enc_lib.h"
#include "mp4lib_int.h"
#include "m4venc_oscl.h"
#define VOP_OFFSET ((lx<<4)+16) /* for offset to image area */
#define CVOP_OFFSET ((lx<<2)+8)
#define PREF_INTRA 512 /* bias for INTRA coding */
/*===============================================================
Function: ChooseMode
Date: 09/21/2000
Purpose: Choosing between INTRA or INTER
Input/Output: Pointer to the starting point of the macroblock.
Note:
===============================================================*/
void ChooseMode_C(UChar *Mode, UChar *cur, Int lx, Int min_SAD)
{
Int i, j;
Int MB_mean, A, tmp, Th;
Int offset = (lx >> 2) - 4;
UChar *p = cur;
Int *pint = (Int *) cur, temp = 0;
MB_mean = 0;
A = 0;
Th = (min_SAD - PREF_INTRA) >> 1;
for (j = 0; j < 8; j++)
{
/* Odd Rows */
temp += (*pint++) & 0x00FF00FF;
temp += (*pint++) & 0x00FF00FF;
temp += (*pint++) & 0x00FF00FF;
temp += (*pint++) & 0x00FF00FF;
pint += offset;
/* Even Rows */
temp += (*pint++ >> 8) & 0x00FF00FF;
temp += (*pint++ >> 8) & 0x00FF00FF;
temp += (*pint++ >> 8) & 0x00FF00FF;
temp += (*pint++ >> 8) & 0x00FF00FF;
pint += offset;
}
MB_mean = (((temp & 0x0000FFFF)) + ((temp & 0xFFFF0000) >> 16)) >> 7;
p = cur;
offset = lx - 16;
for (j = 0; j < 16; j++)
{
temp = (j & 1);
p += temp;
i = 8;
while (i--)
{
tmp = *p - MB_mean;
p += 2;
if (tmp > 0) A += tmp;
else A -= tmp;
}
if (A >= Th)
{
*Mode = MODE_INTER;
return ;
}
p += (offset - temp);
}
if (A < Th)
*Mode = MODE_INTRA;
else
*Mode = MODE_INTER;
return ;
}
/*===============================================================
Function: GetHalfPelMBRegion
Date: 09/17/2000
Purpose: Interpolate the search region for half-pel search
Input/Output: Center of the search, Half-pel memory, width
Note: rounding type should be parameterized.
Now fixed it to zero!!!!!!
===============================================================*/
void GetHalfPelMBRegion_C(UChar *cand, UChar *hmem, Int lx)
{
Int i, j;
UChar *p1, *p2, *p3, *p4;
UChar *hmem1 = hmem;
UChar *hmem2 = hmem1 + 33;
Int offset = lx - 17;
p1 = cand - lx - 1;
p2 = cand - lx;
p3 = cand - 1;
p4 = cand;
for (j = 0; j < 16; j++)
{
for (i = 0; i < 16; i++)
{
*hmem1++ = ((*p1++) + *p2 + *p3 + *p4 + 2) >> 2;
*hmem1++ = ((*p2++) + *p4 + 1) >> 1;
*hmem2++ = ((*p3++) + *p4 + 1) >> 1;
*hmem2++ = *p4++;
}
/* last pixel */
*hmem1++ = ((*p1++) + (*p2++) + *p3 + *p4 + 2) >> 2;
*hmem2++ = ((*p3++) + (*p4++) + 1) >> 1;
hmem1 += 33;
hmem2 += 33;
p1 += offset;
p2 += offset;
p3 += offset;
p4 += offset;
}
/* last row */
for (i = 0; i < 16; i++)
{
*hmem1++ = ((*p1++) + *p2 + (*p3++) + *p4 + 2) >> 2;
*hmem1++ = ((*p2++) + (*p4++) + 1) >> 1;
}
*hmem1 = (*p1 + *p2 + *p3 + *p4 + 2) >> 2;
return ;
}
/*===============================================================
Function: GetHalfPelBlkRegion
Date: 09/20/2000
Purpose: Interpolate the search region for half-pel search
in 4MV mode.
Input/Output: Center of the search, Half-pel memory, width
Note: rounding type should be parameterized.
Now fixed it to zero!!!!!!
===============================================================*/
void GetHalfPelBlkRegion(UChar *cand, UChar *hmem, Int lx)
{
Int i, j;
UChar *p1, *p2, *p3, *p4;
UChar *hmem1 = hmem;
UChar *hmem2 = hmem1 + 17;
Int offset = lx - 9;
p1 = cand - lx - 1;
p2 = cand - lx;
p3 = cand - 1;
p4 = cand;
for (j = 0; j < 8; j++)
{
for (i = 0; i < 8; i++)
{
*hmem1++ = ((*p1++) + *p2 + *p3 + *p4 + 2) >> 2;
*hmem1++ = ((*p2++) + *p4 + 1) >> 1;
*hmem2++ = ((*p3++) + *p4 + 1) >> 1;
*hmem2++ = *p4++;
}
/* last pixel */
*hmem1++ = ((*p1++) + (*p2++) + *p3 + *p4 + 2) >> 2;
*hmem2++ = ((*p3++) + (*p4++) + 1) >> 1;
hmem1 += 17;
hmem2 += 17;
p1 += offset;
p2 += offset;
p3 += offset;
p4 += offset;
}
/* last row */
for (i = 0; i < 8; i++)
{
*hmem1++ = ((*p1++) + *p2 + (*p3++) + *p4 + 2) >> 2;
*hmem1++ = ((*p2++) + (*p4++) + 1) >> 1;
}
*hmem1 = (*p1 + *p2 + *p3 + *p4 + 2) >> 2;
return ;
}
/*=====================================================================
Function: PaddingEdge
Date: 09/16/2000
Purpose: Pad edge of a Vop
Modification: 09/20/05.
=====================================================================*/
void PaddingEdge(Vop *refVop)
{
UChar *src, *dst;
Int i;
Int pitch, width, height;
ULong temp1, temp2;
width = refVop->width;
height = refVop->height;
pitch = refVop->pitch;
/* pad top */
src = refVop->yChan;
temp1 = *src; /* top-left corner */
temp2 = src[width-1]; /* top-right corner */
temp1 |= (temp1 << 8);
temp1 |= (temp1 << 16);
temp2 |= (temp2 << 8);
temp2 |= (temp2 << 16);
dst = src - (pitch << 4);
*((ULong*)(dst - 16)) = temp1;
*((ULong*)(dst - 12)) = temp1;
*((ULong*)(dst - 8)) = temp1;
*((ULong*)(dst - 4)) = temp1;
M4VENC_MEMCPY(dst, src, width);
*((ULong*)(dst += width)) = temp2;
*((ULong*)(dst + 4)) = temp2;
*((ULong*)(dst + 8)) = temp2;
*((ULong*)(dst + 12)) = temp2;
dst = dst - width - 16;
i = 15;
while (i--)
{
M4VENC_MEMCPY(dst + pitch, dst, pitch);
dst += pitch;
}
/* pad sides */
dst += (pitch + 16);
src = dst;
i = height;
while (i--)
{
temp1 = *src;
temp2 = src[width-1];
temp1 |= (temp1 << 8);
temp1 |= (temp1 << 16);
temp2 |= (temp2 << 8);
temp2 |= (temp2 << 16);
*((ULong*)(dst - 16)) = temp1;
*((ULong*)(dst - 12)) = temp1;
*((ULong*)(dst - 8)) = temp1;
*((ULong*)(dst - 4)) = temp1;
*((ULong*)(dst += width)) = temp2;
*((ULong*)(dst + 4)) = temp2;
*((ULong*)(dst + 8)) = temp2;
*((ULong*)(dst + 12)) = temp2;
src += pitch;
dst = src;
}
/* pad bottom */
dst -= 16;
i = 16;
while (i--)
{
M4VENC_MEMCPY(dst, dst - pitch, pitch);
dst += pitch;
}
return ;
}
/*===================================================================
Function: ComputeMBSum
Date: 10/28/2000
Purpose: Compute sum of absolute value (SAV) of blocks in a macroblock
in INTRA mode needed for rate control. Thus, instead of
computing the SAV, we can compute first order moment or
variance .
11/28/00: add MMX
9/3/01: do parallel comp for C function.
===================================================================*/
void ComputeMBSum_C(UChar *cur, Int lx, MOT *mot_mb)
{
Int j;
Int *cInt, *cInt2;
Int sad1 = 0, sad2 = 0, sad3 = 0, sad4 = 0;
Int tmp, tmp2, mask = 0x00FF00FF;
cInt = (Int*)cur; /* make sure this is word-align */
cInt2 = (Int*)(cur + (lx << 3));
j = 8;
while (j--)
{
tmp = cInt[3]; /* load 4 pixels at a time */
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad2 += tmp;
tmp = cInt[2];
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad2 += tmp;
tmp = cInt[1];
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad1 += tmp;
tmp = *cInt;
cInt += (lx >> 2);
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad1 += tmp;
tmp = cInt2[3];
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad4 += tmp;
tmp = cInt2[2];
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad4 += tmp;
tmp = cInt2[1];
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad3 += tmp;
tmp = *cInt2;
cInt2 += (lx >> 2);
tmp2 = tmp & mask;
tmp = (tmp >> 8) & mask;
tmp += tmp2;
sad3 += tmp;
}
sad1 += (sad1 << 16);
sad2 += (sad2 << 16);
sad3 += (sad3 << 16);
sad4 += (sad4 << 16);
sad1 >>= 16;
sad2 >>= 16;
sad3 >>= 16;
sad4 >>= 16;
mot_mb[1].sad = sad1;
mot_mb[2].sad = sad2;
mot_mb[3].sad = sad3;
mot_mb[4].sad = sad4;
mot_mb[0].sad = sad1 + sad2 + sad3 + sad4;
return ;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,228 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _PVDECDEF_H_
#define _PVDECDEF_H_
#include <stdlib.h>
#include <string.h>
// Redefine the int types
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint32_t uint32;
typedef int32_t int32;
typedef unsigned int uint;
/********** platform dependent in-line assembly *****************************/
/*************** Intel *****************/
/*************** ARM *****************/
/* for general ARM instruction. #define __ARM has to be defined in compiler set up.*/
/* for DSP MUL */
#ifdef __TARGET_FEATURE_DSPMUL
#define _ARM_DSP_MUL
#endif
/* for Count Leading Zero instruction */
#ifdef __TARGET_ARCH_5T
#define _ARM_CLZ
#endif
#ifdef __TARGET_ARCH_5TE
#define _ARM_CLZ
#endif
/****************************************************************************/
#ifndef _PV_TYPES_
#define _PV_TYPES_
typedef unsigned char UChar;
typedef char Char;
typedef unsigned int UInt;
typedef int Int;
typedef unsigned short UShort;
typedef short Short;
typedef short int SInt;
typedef unsigned int Bool;
typedef unsigned long ULong;
typedef void Void;
#define PV_CODEC_INIT 0
#define PV_CODEC_STOP 1
#define PV_CODEC_RUNNING 2
#define PV_CODEC_RESET 3
#endif
typedef enum
{
PV_SUCCESS,
PV_FAIL,
PV_EOS, /* hit End_Of_Sequence */
PV_MB_STUFFING, /* hit Macroblock_Stuffing */
PV_END_OF_VOP, /* hit End_of_Video_Object_Plane */
PV_END_OF_MB, /* hit End_of_Macroblock */
PV_END_OF_BUF /* hit End_of_Bitstream_Buffer */
} PV_STATUS;
typedef UChar PIXEL;
//typedef Int MOT; /* : "int" type runs faster on RISC machine */
#define HTFM /* 3/2/01, Hypothesis Test Fast Matching for early drop-out*/
//#define _MOVE_INTERFACE
//#define RANDOM_REFSELCODE
/* handle the case of devision by zero in RC */
#define MAD_MIN 1
/* 4/11/01, if SSE or MMX, no HTFM, no SAD_HP_FLY */
/* Code size reduction related Macros */
#ifdef H263_ONLY
#ifndef NO_RVLC
#define NO_RVLC
#endif
#ifndef NO_MPEG_QUANT
#define NO_MPEG_QUANT
#endif
#ifndef NO_INTER4V
#define NO_INTER4V
#endif
#endif
/**************************************/
#define TRUE 1
#define FALSE 0
#define PV_ABS(x) (((x)<0)? -(x) : (x))
#define PV_SIGN(x) (((x)<0)? -1 : 1)
#define PV_SIGN0(a) (((a)<0)? -1 : (((a)>0) ? 1 : 0))
#define PV_MAX(a,b) ((a)>(b)? (a):(b))
#define PV_MIN(a,b) ((a)<(b)? (a):(b))
#define MODE_INTRA 0
#define MODE_INTER 1
#define MODE_INTRA_Q 2
#define MODE_INTER_Q 3
#define MODE_INTER4V 4
#define MODE_SKIPPED 6
#define I_VOP 0
#define P_VOP 1
#define B_VOP 2
/*09/04/00 Add MB height and width */
#define MB_WIDTH 16
#define MB_HEIGHT 16
#define VOP_BRIGHT_WHITEENC 255
#define LUMINANCE_DC_TYPE 1
#define CHROMINANCE_DC_TYPE 2
#define EOB_CODE 1
#define EOB_CODE_LENGTH 32
/* 11/30/98 */
#define FoundRM 1 /* Resync Marker */
#define FoundVSC 2 /* VOP_START_CODE. */
#define FoundGSC 3 /* GROUP_START_CODE */
#define FoundEOB 4 /* EOB_CODE */
/* 05/08/2000, the error code returned from BitstreamShowBits() */
#define BITSTREAM_ERROR_CODE 0xFFFFFFFF
/* PacketVideo "absolution timestamp" object. 06/13/2000 */
#define PVTS_START_CODE 0x01C4
#define PVTS_START_CODE_LENGTH 32
/* session layer and vop layer start codes */
#define SESSION_START_CODE 0x01B0
#define SESSION_END_CODE 0x01B1
#define VISUAL_OBJECT_START_CODE 0x01B5
#define VO_START_CODE 0x8
#define VO_HEADER_LENGTH 32 /* lengtho of VO header: VO_START_CODE + VO_ID */
#define SOL_START_CODE 0x01BE
#define SOL_START_CODE_LENGTH 32
#define VOL_START_CODE 0x12
#define VOL_START_CODE_LENGTH 28
#define VOP_START_CODE 0x1B6
#define VOP_START_CODE_LENGTH 32
#define GROUP_START_CODE 0x01B3
#define GROUP_START_CODE_LENGTH 32
#define VOP_ID_CODE_LENGTH 5
#define VOP_TEMP_REF_CODE_LENGTH 16
#define USER_DATA_START_CODE 0x01B2
#define USER_DATA_START_CODE_LENGTH 32
#define START_CODE_PREFIX 0x01
#define START_CODE_PREFIX_LENGTH 24
#define SHORT_VIDEO_START_MARKER 0x20
#define SHORT_VIDEO_START_MARKER_LENGTH 22
#define SHORT_VIDEO_END_MARKER 0x3F
#define GOB_RESYNC_MARKER 0x01
#define GOB_RESYNC_MARKER_LENGTH 17
/* motion and resync markers used in error resilient mode */
#define DC_MARKER 438273
#define DC_MARKER_LENGTH 19
#define MOTION_MARKER_COMB 126977
#define MOTION_MARKER_COMB_LENGTH 17
#define MOTION_MARKER_SEP 81921
#define MOTION_MARKER_SEP_LENGTH 17
#define RESYNC_MARKER 1
#define RESYNC_MARKER_LENGTH 17
#define SPRITE_NOT_USED 0
#define STATIC_SPRITE 1
#define ONLINE_SPRITE 2
#define GMC_SPRITE 3
/* macroblock and block size */
#define MB_SIZE 16
#define NCOEFF_MB (MB_SIZE*MB_SIZE)
#define B_SIZE 8
#define NCOEFF_BLOCK (B_SIZE*B_SIZE)
#define NCOEFF_Y NCOEFF_MB
#define NCOEFF_U NCOEFF_BLOCK
#define NCOEFF_V NCOEFF_BLOCK
/* overrun buffer size */
#define DEFAULT_OVERRUN_BUFFER_SIZE 1000
/* VLC decoding related definitions */
#define VLC_ERROR (-1)
#define VLC_ESCAPE 7167
#endif /* _PVDECDEF_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,207 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _MP4ENC_LIB_H_
#define _MP4ENC_LIB_H_
#include "mp4def.h" // typedef
#include "mp4lib_int.h" // main video structure
#ifdef __cplusplus
extern "C"
{
#endif
/* defined in vop.c */
PV_STATUS EncodeVop(VideoEncData *video);
PV_STATUS EncodeSlice(VideoEncData *video);
PV_STATUS EncodeVideoPacketHeader(VideoEncData *video, int MB_number,
int quant_scale, Int insert);
#ifdef ALLOW_VOP_NOT_CODED
PV_STATUS EncodeVopNotCoded(VideoEncData *video, UChar *bstream, Int *size, ULong modTime);
#endif
/* defined in combined_decode.c */
PV_STATUS EncodeFrameCombinedMode(VideoEncData *video);
PV_STATUS EncodeSliceCombinedMode(VideoEncData *video);
/* defined in datapart_decode.c */
PV_STATUS EncodeFrameDataPartMode(VideoEncData *video);
PV_STATUS EncodeSliceDataPartMode(VideoEncData *video);
/* defined in fastcodeMB.c */
//void m4v_memset(void *adr_dst, uint8 value, uint32 size);
PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int offsetQP, Int ncoefblck[]);
#ifndef NO_MPEG_QUANT
PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int offsetQP, Int ncoefblck[]);
#endif
Int getBlockSAV(Short block[]);
Int Sad8x8(UChar *rec, UChar *prev, Int lx);
Int getBlockSum(UChar *rec, Int lx);
/* defined in dct.c */
void blockIdct(Short *block);
void blockIdct_SSE(Short *input);
void BlockDCTEnc(Short *blockData, Short *blockCoeff);
/*---- FastQuant.c -----*/
Int cal_dc_scalerENC(Int QP, Int type) ;
Int BlockQuantDequantH263Inter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dummy, UChar shortHeader);
Int BlockQuantDequantH263Intra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int dctMode, Int comp, Int dc_scaler, UChar shortHeader);
Int BlockQuantDequantH263DCInter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar *bitmaprow, UInt *bitmapzz, Int dummy, UChar shortHeader);
Int BlockQuantDequantH263DCIntra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,
UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler, UChar shortHeader);
#ifndef NO_MPEG_QUANT
Int BlockQuantDequantMPEGInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int DctMode, Int comp, Int dc_scaler);
Int BlockQuantDequantMPEGIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,
Int DctMode, Int comp, Int dc_scaler);
Int BlockQuantDequantMPEGDCInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy);
Int BlockQuantDequantMPEGDCIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,
UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler);
#endif
/*---- FastIDCT.c -----*/
void BlockIDCTMotionComp(Short *block, UChar *bitmapcol, UChar bitmaprow,
Int dctMode, UChar *rec, UChar *prev, Int lx_intra_zeroMV);
/* defined in motion_comp.c */
void getMotionCompensatedMB(VideoEncData *video, Int ind_x, Int ind_y, Int offset);
void EncPrediction_INTER(Int xpred, Int ypred, UChar *c_prev, UChar *c_rec,
Int width, Int round1);
void EncPrediction_INTER4V(Int xpred, Int ypred, MOT *mot, UChar *c_prev, UChar *c_rec,
Int width, Int round1);
void EncPrediction_Chrom(Int xpred, Int ypred, UChar *cu_prev, UChar *cv_prev, UChar *cu_rec,
UChar *cv_rec, Int pitch_uv, Int width_uv, Int height_uv, Int round1);
void get_MB(UChar *c_prev, UChar *c_prev_u , UChar *c_prev_v,
Short mb[6][64], Int width, Int width_uv);
void PutSkippedBlock(UChar *rec, UChar *prev, Int lx);
/* defined in motion_est.c */
void MotionEstimation(VideoEncData *video);
#ifdef HTFM
void InitHTFM(VideoEncData *video, HTFM_Stat *htfm_stat, double *newvar, Int *collect);
void UpdateHTFM(VideoEncData *video, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat);
#endif
/* defined in ME_utils.c */
void ChooseMode_C(UChar *Mode, UChar *cur, Int lx, Int min_SAD);
void ChooseMode_MMX(UChar *Mode, UChar *cur, Int lx, Int min_SAD);
void GetHalfPelMBRegion_C(UChar *cand, UChar *hmem, Int lx);
void GetHalfPelMBRegion_SSE(UChar *cand, UChar *hmem, Int lx);
void GetHalfPelBlkRegion(UChar *cand, UChar *hmem, Int lx);
void PaddingEdge(Vop *padVop);
void ComputeMBSum_C(UChar *cur, Int lx, MOT *mot_mb);
void ComputeMBSum_MMX(UChar *cur, Int lx, MOT *mot_mb);
void ComputeMBSum_SSE(UChar *cur, Int lx, MOT *mot_mb);
void GetHalfPelMBRegionPadding(UChar *ncand, UChar *hmem, Int lx, Int *reptl);
void GetHalfPelBlkRegionPadding(UChar *ncand, UChar *hmem, Int lx, Int *reptl);
/* defined in findhalfpel.c */
void FindHalfPelMB(VideoEncData *video, UChar *cur, MOT *mot, UChar *ncand,
Int xpos, Int ypos, Int *xhmin, Int *yhmin, Int hp_guess);
Int FindHalfPelBlk(VideoEncData *video, UChar *cur, MOT *mot, Int sad16, UChar *ncand8[],
UChar *mode, Int xpos, Int ypos, Int *xhmin, Int *yhmin, UChar *hp_mem);
/* defined in sad.c */
Int SAD_MB_HalfPel_Cxhyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HalfPel_Cyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HalfPel_Cxh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HalfPel_MMX(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HalfPel_SSE(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_Blk_HalfPel_C(UChar *ref, UChar *blk, Int dmin, Int lx, Int rx, Int xh, Int yh, void *extra_info);
Int SAD_Blk_HalfPel_MMX(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
Int SAD_Blk_HalfPel_SSE(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
Int SAD_Macroblock_C(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_Macroblock_MMX(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_Macroblock_SSE(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_Block_C(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
Int SAD_Block_MMX(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
Int SAD_Block_SSE(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
#ifdef HTFM /* Hypothesis Testing Fast Matching */
Int SAD_MB_HP_HTFM_Collectxhyh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info);
Int SAD_MB_HP_HTFM_Collectyh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info);
Int SAD_MB_HP_HTFM_Collectxh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info);
Int SAD_MB_HP_HTFMxhyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HP_HTFMyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HP_HTFMxh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HTFM_Collect(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int SAD_MB_HTFM(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
#endif
/* on-the-fly padding */
Int SAD_Blk_PADDING(UChar *ref, UChar *cur, Int dmin, Int lx, void *extra_info);
Int SAD_MB_PADDING(UChar *ref, UChar *cur, Int dmin, Int lx, void *extra_info);
#ifdef HTFM
Int SAD_MB_PADDING_HTFM_Collect(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
Int SAD_MB_PADDING_HTFM(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
#endif
/* defined in rate_control.c */
/* These are APIs to rate control exposed to core encoder module. */
PV_STATUS RC_Initialize(void *video);
PV_STATUS RC_VopQPSetting(VideoEncData *video, rateControl *rc[]);
PV_STATUS RC_VopUpdateStat(VideoEncData *video, rateControl *rc);
PV_STATUS RC_MBQPSetting(VideoEncData *video, rateControl *rc, Int start_packet_header);
PV_STATUS RC_MBUpdateStat(VideoEncData *video, rateControl *rc, Int Bi, Int Hi);
PV_STATUS RC_Cleanup(rateControl *rc[], Int numLayers);
Int RC_GetSkipNextFrame(VideoEncData *video, Int currLayer);
Int RC_GetRemainingVops(VideoEncData *video, Int currLayer);
void RC_ResetSkipNextFrame(VideoEncData *video, Int currLayer);
PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip);
PV_STATUS RC_UpdateBXRCParams(void *input);
/* defined in vlc_encode.c */
void MBVlcEncodeDataPar_I_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);
void MBVlcEncodeDataPar_P_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);
void MBVlcEncodeCombined_I_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);
void MBVlcEncodeCombined_P_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);
void BlockCodeCoeff_ShortHeader(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode);
void BlockCodeCoeff_RVLC(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode);
void BlockCodeCoeff_Normal(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode);
#ifdef __cplusplus
}
#endif
#endif /* _MP4ENC_LIB_H_ */

View File

@ -0,0 +1,472 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _MP4LIB_INT_H_
#define _MP4LIB_INT_H_
#include "mp4def.h"
#include "mp4enc_api.h"
#include "rate_control.h"
/* BitstreamEncVideo will be modified */
typedef struct tagBitstream
{
Int(*writeVideoPacket)(UChar *buf, Int nbytes_required); /*write video packet out */
UChar *bitstreamBuffer; /*buffer to hold one video packet*/
Int bufferSize; /*total bitstream buffer size in bytes */
Int byteCount; /*how many bytes already encoded*/
UInt word; /*hold one word temporarily */
Int bitLeft; /*number of bits left in "word" */
UChar* overrunBuffer; /* pointer to overrun buffer */
Int oBSize; /* length of overrun buffer */
struct tagVideoEncData *video;
} BitstreamEncVideo;
typedef struct tagVOP
{
PIXEL *yChan; /* The Y component */
PIXEL *uChan; /* The U component */
PIXEL *vChan; /* The V component */
Int frame; /* frame number */
Int volID; /* Layer number */
//Int timeStamp; /* Vop TimeStamp in msec */
/* Syntax elements copied from VOL (standard) */
Int width; /* Width (multiple of 16) */
Int height; /* Height (multiple of 16) */
Int pitch; /* Pitch (differs from width for UMV case) */
Int padded; /* flag whether this frame has been padded */
/* Actual syntax elements for VOP (standard) */
Int predictionType; /* VOP prediction type */
Int timeInc; /* VOP time increment (relative to last mtb) */
Int vopCoded;
Int roundingType;
Int intraDCVlcThr;
Int quantizer; /* VOP quantizer */
Int fcodeForward; /* VOP dynamic range of motion vectors */
Int fcodeBackward; /* VOP dynamic range of motion vectors */
Int refSelectCode; /* enhancement layer reference select code */
/* H.263 parameters */
Int gobNumber;
Int gobFrameID;
Int temporalRef; /* temporal reference, roll over at 256 */
Int temporalInterval; /* increase every 256 temporalRef */
} Vop;
typedef struct tagVol
{
Int volID; /* VOL identifier (for tracking) */
Int shortVideoHeader; /* shortVideoHeader mode */
Int GOVStart; /* Insert GOV Header */
Int timeIncrementResolution; /* VOL time increment */
Int nbitsTimeIncRes; /* number of bits for time increment */
Int timeIncrement; /* time increment */
Int moduloTimeBase; /* internal decoder clock */
Int prevModuloTimeBase; /* in case of pre-frameskip */
Int fixedVopRate;
BitstreamEncVideo *stream; /* library bitstream buffer (input buffer) */
/* VOL Dimensions */
Int width; /* Width */
Int height; /* Height */
/* Error Resilience Flags */
Int ResyncMarkerDisable; /* VOL Disable Resynch Markers */
Int useReverseVLC; /* VOL reversible VLCs */
Int dataPartitioning; /* VOL data partitioning */
/* Quantization related parameters */
Int quantPrecision; /* Quantizer precision */
Int quantType; /* MPEG-4 or H.263 Quantization Type */
/* Added loaded quant mat, 05/22/2000 */
Int loadIntraQuantMat; /* Load intra quantization matrix */
Int loadNonIntraQuantMat; /* Load nonintra quantization matrix */
Int iqmat[64]; /* Intra quant.matrix */
Int niqmat[64]; /* Non-intra quant.matrix */
/* Parameters used for scalability */
Int scalability; /* VOL scalability (flag) */
Int scalType; /* temporal = 0, spatial = 1, both = 2 */
Int refVolID; /* VOL id of reference VOL */
Int refSampDir; /* VOL resol. of ref. VOL */
Int horSamp_n; /* VOL hor. resampling of ref. VOL given by */
Int horSamp_m; /* sampfac = hor_samp_n/hor_samp_m */
Int verSamp_n; /* VOL ver. resampling of ref. VOL given by */
Int verSamp_m; /* sampfac = ver_samp_n/ver_samp_m */
Int enhancementType; /* VOL type of enhancement layer */
/* These variables were added since they are used a lot. */
Int nMBPerRow, nMBPerCol; /* number of MBs in each row & column */
Int nTotalMB;
Int nBitsForMBID; /* how many bits required for MB number? */
/* for short video header */
Int nMBinGOB; /* number of MBs in GOB, 05/22/00 */
Int nGOBinVop; /* number of GOB in Vop 05/22/00 */
} Vol;
typedef struct tagMacroBlock
{
Int mb_x; /* X coordinate */
Int mb_y; /* Y coordinate */
Short block[9][64]; /* 4-Y, U and V blocks , and AAN Scale*/
} MacroBlock;
typedef struct tagRunLevelBlock
{
Int run[64]; /* Runlength */
Int level[64]; /* Abs(level) */
Int s[64]; /* sign level */
} RunLevelBlock;
typedef struct tagHeaderInfoDecVideo
{
UChar *Mode; /* Modes INTRA/INTER/etc. */
UChar *CBP; /* MCBPC/CBPY stuff */
} HeaderInfoEncVideo;
typedef Short typeDCStore[6]; /* ACDC */
typedef Short typeDCACStore[4][8];
typedef struct tagMOT
{
Int x; /* half-pel resolution x component */
Int y; /* half-pel resolution y component */
Int sad; /* SAD */
} MOT;
typedef struct tagHintTrackInfo
{
UChar MTB;
UChar LayerID;
UChar CodeType;
UChar RefSelCode;
} HintTrackInfo;
typedef struct tagVideoEncParams
{
//Int Width; /* Input Width */
//Int Height; /* Input Height */
//float FrameRate; /* Input Frame Rate */
UInt TimeIncrementRes; /* timeIncrementRes */
/*VOL Parameters */
Int nLayers;
Int LayerWidth[4]; /* Encoded Width */
Int LayerHeight[4]; /* Encoded Height */
float LayerFrameRate[4]; /* Encoded Frame Rate */
Int LayerBitRate[4]; /* Encoded BitRate */
Int LayerMaxBitRate[4]; /* Maximum Encoded BitRate */
float LayerMaxFrameRate[4]; /* Maximum Encoded Frame Rate */
Int LayerMaxMbsPerSec[4]; /* Maximum mbs per second, according to the specified profile and level */
Int LayerMaxBufferSize[4]; /* Maximum buffer size, according to the specified profile and level */
Bool ResyncMarkerDisable; /* Disable Resync Marker */
Bool DataPartitioning; /* Base Layer Data Partitioning */
Bool ReversibleVLC; /* RVLC when Data Partitioning */
Bool ACDCPrediction; /* AC/DC Prediction */
Int QuantType[4]; /* H263, MPEG2 */
Int InitQuantBvop[4];
Int InitQuantPvop[4];
Int InitQuantIvop[4];
Int ResyncPacketsize;
Int RoundingType;
Int IntraDCVlcThr;
/* Rate Control Parameters */
MP4RateControlType RC_Type; /*Constant Q, M4 constantRate, VM5+, M4RC,MPEG2TM5 */
/* Intra Refresh Parameters */
Int IntraPeriod; /* Intra update period */
Int Refresh; /* Number of MBs refresh in each frame */
/* Other Parameters */
Bool SceneChange_Det; /* scene change detection */
Bool FineFrameSkip_Enabled; /* src rate resolution frame skipping */
Bool VBR_Enabled; /* VBR rate control */
Bool NoFrameSkip_Enabled; /* do not allow frame skip */
Bool NoPreSkip_Enabled; /* do not allow pre-skip */
Bool H263_Enabled; /* H263 Short Header */
Bool GOV_Enabled; /* GOV Header Enabled */
Bool SequenceStartCode; /* This probably should be removed */
Bool FullSearch_Enabled; /* full-pel exhaustive search motion estimation */
Bool HalfPel_Enabled; /* Turn Halfpel ME on or off */
Bool MV8x8_Enabled; /* Enable 8x8 motion vectors */
Bool RD_opt_Enabled; /* Enable operational R-D optimization */
Int GOB_Header_Interval; /* Enable encoding GOB header in H263_WITH_ERR_RES and SHORT_HERDER_WITH_ERR_RES */
Int SearchRange; /* Search range for 16x16 motion vector */
Int MemoryUsage; /* Amount of memory allocated */
Int GetVolHeader[2]; /* Flag to check if Vol Header has been retrieved */
Int BufferSize[2]; /* Buffer Size for Base and Enhance Layers */
Int ProfileLevel[2]; /* Profile and Level for encoding purposes */
float VBV_delay; /* VBV buffer size in the form of delay */
Int maxFrameSize; /* maximum frame size(bits) for H263/Short header mode, k*16384 */
Int profile_table_index; /* index for profile and level tables given the specified profile and level */
} VideoEncParams;
/* platform dependent functions */
typedef struct tagFuncPtr
{
// Int (*SAD_MB_HalfPel)(UChar *ref,UChar *blk,Int dmin_lx,Int xh,Int yh,void *extra_info);
Int(*SAD_MB_HalfPel[4])(UChar*, UChar*, Int, void *);
Int(*SAD_Blk_HalfPel)(UChar *ref, UChar *blk, Int dmin, Int lx, Int rx, Int xh, Int yh, void *extra_info);
Int(*SAD_Macroblock)(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);
Int(*SAD_Block)(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);
Int(*SAD_MB_PADDING)(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); /*, 4/21/01 */
void (*ComputeMBSum)(UChar *cur, Int lx, MOT *mot_mb);
void (*ChooseMode)(UChar *Mode, UChar *cur, Int lx, Int min_SAD);
void (*GetHalfPelMBRegion)(UChar *cand, UChar *hmem, Int lx);
void (*blockIdct)(Int *block);
} FuncPtr;
/* 04/09/01, for multipass rate control */
typedef struct tagRDInfo
{
Int QP;
Int actual_bits;
float mad;
float R_D;
} RDInfo;
typedef struct tagMultiPass
{
/* multipass rate control data */
Int target_bits; /* target bits for current frame, = rc->T */
Int actual_bits; /* actual bits for current frame obtained after encoding, = rc->Rc*/
Int QP; /* quantization level for current frame, = rc->Qc*/
Int prev_QP; /* quantization level for previous frame */
Int prev_prev_QP; /* quantization level for previous frame before last*/
float mad; /* mad for current frame, = video->avgMAD*/
Int bitrate; /* bitrate for current frame */
float framerate; /* framerate for current frame*/
Int nRe_Quantized; /* control variable for multipass encoding, */
/* 0 : first pass */
/* 1 : intermediate pass(quantization and VLC loop only) */
/* 2 : final pass(de-quantization, idct, etc) */
/* 3 : macroblock level rate control */
Int encoded_frames; /* counter for all encoded frames */
Int re_encoded_frames; /* counter for all multipass encoded frames*/
Int re_encoded_times; /* counter for all times of multipass frame encoding */
/* Multiple frame prediction*/
RDInfo **pRDSamples; /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/
Int framePos; /* specific position in previous multiple frames*/
Int frameRange; /* number of overall previous multiple frames */
Int samplesPerFrame[30]; /* number of samples per frame, 30->30fps */
/* Bit allocation for scene change frames and high motion frames */
float sum_mad;
Int counter_BTsrc; /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */
Int counter_BTdst; /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */
float sum_QP;
Int diff_counter; /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */
/* For target bitrate or framerate update */
float target_bits_per_frame; /* = C = bitrate/framerate */
float target_bits_per_frame_prev; /* previous C */
float aver_mad; /* so-far average mad could replace sum_mad */
float aver_mad_prev; /* previous average mad */
Int overlapped_win_size; /* transition period of time */
Int encoded_frames_prev; /* previous encoded_frames */
} MultiPass;
/* End */
#ifdef HTFM
typedef struct tagHTFM_Stat
{
Int abs_dif_mad_avg;
UInt countbreak;
Int offsetArray[16];
Int offsetRef[16];
} HTFM_Stat;
#endif
/* Global structure that can be passed around */
typedef struct tagVideoEncData
{
/* VOL Header Initialization */
UChar volInitialize[4]; /* Used to Write VOL Headers */
/* Data For Layers (Scalability) */
Int numberOfLayers; /* Number of Layers */
Vol **vol; /* Data stored for each VOL */
/* Data used for encoding frames */
VideoEncFrameIO *input; /* original input frame */
Vop *currVop; /* Current reconstructed VOP */
Vop *prevBaseVop; /* Previous reference Base Vop */
Vop *nextBaseVop; /* Next reference Base Vop */
Vop *prevEnhanceVop;/* Previous Enhancement Layer Vop */
Vop *forwardRefVop; /* Forward Reference VOP */
Vop *backwardRefVop; /* Backward Reference VOP */
/* scratch memory */
BitstreamEncVideo *bitstream1; /* Used for data partitioning */
BitstreamEncVideo *bitstream2; /* and combined modes as */
BitstreamEncVideo *bitstream3; /* intermediate storages */
UChar *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/
Int oBSize; /* size of allocated overrun buffer */
Int dc_scalar_1; /*dc scalar for Y block */
Int dc_scalar_2; /*dc scalar for U, V block*/
/* Annex L Rate Control */
rateControl *rc[4]; /* Pointer to Rate Control structure*/
/* 12/25/00, each R.C. for each layer */
/********* motion compensation related variables ****************/
MOT **mot; /* Motion vectors */
/* where [mbnum][0] = 1MV.
[mbnum][1...4] = 4MVs
[mbnum][5] = backward MV.
[mbnum][6] = delta MV for direct mode.
[mbnum][7] = nothing yet. */
UChar *intraArray; /* Intra Update Arrary */
float sumMAD; /* SAD/MAD for frame */
/* to speedup the SAD calculation */
void *sad_extra_info;
#ifdef HTFM
Int nrmlz_th[48]; /* Threshold for fast SAD calculation using HTFM */
HTFM_Stat htfm_stat; /* For statistics collection */
#endif
/*Tao 04/09/00 For DCT routine */
UChar currYMB[256]; /* interleaved current macroblock in HTFM order */
MacroBlock *outputMB; /* Output MB to VLC encode */
UChar predictedMB[384]; /* scrath memory for predicted value */
RunLevelBlock RLB[6]; /* Run and Level of coefficients! */
Short dataBlock[128]; /* DCT block data before and after quant/dequant*/
UChar bitmaprow[8]; /* Need to keep it for ACDCPrediction, 8 bytes for alignment, need only 6 */
UChar bitmapcol[6][8];
UInt bitmapzz[6][2]; /* for zigzag bitmap */
Int zeroMV; /* flag for zero MV */
Int usePrevQP; /* flag for intraDCVlcThreshold switch decision */
Int QP_prev; /* use for DQUANT calculation */
Int *acPredFlag; /* */
typeDCStore *predDC; /* The DC coeffs for each MB */
typeDCACStore *predDCAC_row;
typeDCACStore *predDCAC_col;
UChar *sliceNo; /* Slice Number for each MB */
Int header_bits; /* header bits in frmae */
HeaderInfoEncVideo headerInfo; /* MB Header information */
UChar zz_direction; /* direction of zigzag scan */
UChar *QPMB; /* Quantizer value for each MB */
/* Miscellaneous data points to be passed */
float FrameRate; /* Src frame Rate */
ULong nextModTime; /* expected next frame time */
UInt prevFrameNum[4]; /* previous frame number starting from modTimeRef */
UInt modTimeRef; /* Reference modTime update every I-Vop*/
UInt refTick[4]; /* second aligned referenc tick */
Int relLayerCodeTime[4];/* Next coding time for each Layer relative to highest layer */
ULong modTime; /* Input frame modTime */
Int currLayer; /* Current frame layer */
Int mbnum; /* Macroblock number */
/* slice coding, state variables */
Vop *tempForwRefVop;
Int tempRefSelCode;
Int end_of_buf; /* end of bitstream buffer flag */
Int slice_coding; /* flag for slice based coding */
Int totalSAD; /* So far total SAD for a frame */
Int numIntra; /* So far number of Intra MB */
Int offset; /* So far MB offset */
Int ind_x, ind_y; /* So far MB coordinate */
Int collect;
Int hp_guess;
/*********************************/
HintTrackInfo hintTrackInfo; /* hintTrackInfo */
/* IntraPeriod, Timestamp, etc. */
float nextEncIVop; /* counter til the next I-Vop */
float numVopsInGOP; /* value at the beginning of nextEncIVop */
/* platform dependent functions */
FuncPtr *functionPointer; /* structure containing platform dependent functions */
/* Application controls */
VideoEncControls *videoEncControls;
VideoEncParams *encParams;
MultiPass *pMP[4]; /* for multipass encoding, 4 represents 4 layer encoding */
} VideoEncData;
/*************************************************************/
/* VLC structures */
/*************************************************************/
typedef struct tagVLCtable
{
unsigned int code; /* right justified */
int len;
} VLCtable, *LPVLCtable;
/*************************************************************/
/* Approx DCT */
/*************************************************************/
typedef struct struct_approxDCT approxDCT;
struct struct_approxDCT
{
Void(*BlockDCT8x8)(Int *, Int *, UChar *, UChar *, Int, Int);
Void(*BlockDCT8x8Intra)(Int *, Int *, UChar *, UChar *, Int, Int);
Void(*BlockDCT8x8wSub)(Int *, Int *, UChar *, UChar *, Int, Int);
};
/*************************************************************/
/* QP structure */
/*************************************************************/
struct QPstruct
{
Int QPx2 ;
Int QP;
Int QPdiv2;
Int QPx2plus;
Int Addition;
};
#endif /* _MP4LIB_INT_H_ */

View File

@ -0,0 +1,885 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4lib_int.h"
#include "rate_control.h"
#include "mp4enc_lib.h"
#include "bitstream_io.h"
#include "m4venc_oscl.h"
void targetBitCalculation(void *input);
void calculateQuantizer_Multipass(void *video);
void updateRateControl(rateControl *rc, VideoEncData *video);
void updateRC_PostProc(rateControl *rc, VideoEncData *video);
/***************************************************************************
************** RC APIs to core encoding modules *******************
PV_STATUS RC_Initialize(void *video);
PV_STATUS RC_Cleanup(rateControl *rc[],Int numLayers);
PV_STATUS RC_VopQPSetting(VideoEncData *video,rateControl *rc[]);
PV_STATUS RC_VopUpdateStat(VideoEncData *video,rateControl *rc[]);
PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip);
Int RC_GetSkipNextFrame(VideoEncData *video,Int currLayer);
void RC_ResetSkipNextFrame(void *video,Int currLayer);
PV_STATUS RC_UpdateBXRCParams(void *input); Parameters update for target bitrate or framerate change
****************************************************************************/
/************************************************************************/
/************ API part **************************************************/
/* must be called before each sequence*/
PV_STATUS RC_Initialize(void *input)
{
VideoEncData *video = (VideoEncData *) input;
VideoEncParams *encParams = video->encParams;
rateControl **rc = video->rc;
Int numLayers = encParams->nLayers;
Int *LayerBitRate = encParams->LayerBitRate;
float *LayerFrameRate = encParams->LayerFrameRate;
MultiPass **pMP = video->pMP;
Int n;
for (n = 0; n < numLayers; n++)
{
/* rate control */
rc[n]->fine_frame_skip = encParams->FineFrameSkip_Enabled;
rc[n]->no_frame_skip = encParams->NoFrameSkip_Enabled;
rc[n]->no_pre_skip = encParams->NoPreSkip_Enabled;
rc[n]->skip_next_frame = 0; /* must be initialized */
//rc[n]->TMN_TH = (Int)((float)LayerBitRate[n]/LayerFrameRate[n]);
rc[n]->Bs = video->encParams->BufferSize[n];
rc[n]->TMN_W = 0;
rc[n]->VBV_fullness = (Int)(rc[n]->Bs * 0.5); /* rc[n]->Bs */
rc[n]->encoded_frames = 0;
rc[n]->framerate = LayerFrameRate[n];
if (n == 0)
{
rc[n]->TMN_TH = (Int)((float)LayerBitRate[n] / LayerFrameRate[n]);
rc[n]->bitrate = LayerBitRate[n];
rc[n]->framerate = LayerFrameRate[n];
// For h263 or short header mode, the bit variation is within (-2*Rmax*1001/3000, 2*Rmax*1001/3000)
if (video->encParams->H263_Enabled)
{
rc[n]->max_BitVariance_num = (Int)((rc[n]->Bs - video->encParams->maxFrameSize) / 2 / (rc[n]->bitrate / rc[n]->framerate / 10.0)) - 5;
if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;
}
else // MPEG-4 normal modes
{
rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness) / ((float)LayerBitRate[n] / LayerFrameRate[n] / 10.0)) - 5;
if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;
}
}
else
{
if (LayerFrameRate[n] - LayerFrameRate[n-1] > 0) /* 7/31/03 */
{
rc[n]->TMN_TH = (Int)((float)(LayerBitRate[n] - LayerBitRate[n-1]) / (LayerFrameRate[n] - LayerFrameRate[n-1]));
rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness) * 10 / ((float)rc[n]->TMN_TH)) - 5;
if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;
}
else /* 7/31/03 */
{
rc[n]->TMN_TH = 1 << 30;
rc[n]->max_BitVariance_num = 0;
}
rc[n]->bitrate = LayerBitRate[n] - LayerBitRate[n-1];
rc[n]->framerate = LayerFrameRate[n] - LayerFrameRate[n-1];
}
// Set the initial buffer fullness
if (1) //!video->encParams->H263_Enabled) { // MPEG-4
{
/* According to the spec, the initial buffer fullness needs to be set to 1/3 */
rc[n]->VBV_fullness = (Int)(rc[n]->Bs / 3.0 - rc[n]->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */
pMP[n]->counter_BTsrc = (Int)((rc[n]->Bs / 2.0 - rc[n]->Bs / 3.0) / (rc[n]->bitrate / rc[n]->framerate / 10.0));
rc[n]->TMN_W = (Int)(rc[n]->VBV_fullness + pMP[n]->counter_BTsrc * (rc[n]->bitrate / rc[n]->framerate / 10.0));
rc[n]->low_bound = -rc[n]->Bs / 2;
rc[n]-> VBV_fullness_offset = 0;
}
else /* this part doesn't work in some cases, the low_bound is too high, Jan 4,2006 */
{
rc[n]->VBV_fullness = rc[n]->Bs - (Int)(video->encParams->VBV_delay * rc[n]->bitrate);
if (rc[n]->VBV_fullness < 0) rc[n]->VBV_fullness = 0;
//rc[n]->VBV_fullness = (rc[n]->Bs-video->encParams->maxFrameSize)/2 + video->encParams->maxFrameSize;
rc[n]->VBV_fullness -= rc[n]->Bs / 2; /* the buffer range is [-Bs/2, Bs/2] */
rc[n]->low_bound = -rc[n]->Bs / 2 + video->encParams->maxFrameSize; /* too high */
rc[n]->VBV_fullness_offset = video->encParams->maxFrameSize / 2; /* don't understand the meaning of this */
pMP[n]->counter_BTdst = pMP[n]->counter_BTsrc = 0;
}
/* Setting the bitrate and framerate */
pMP[n]->bitrate = rc[n]->bitrate;
pMP[n]->framerate = rc[n]->framerate;
pMP[n]->target_bits_per_frame = pMP[n]->bitrate / pMP[n]->framerate;
}
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : RC_Cleanup */
/* Date : 12/20/2000 */
/* Purpose : free Rate Control memory */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS RC_Cleanup(rateControl *rc[], Int numLayers)
{
OSCL_UNUSED_ARG(rc);
OSCL_UNUSED_ARG(numLayers);
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : RC_VopQPSetting */
/* Date : 4/11/2001 */
/* Purpose : Reset rate control before coding VOP, moved from vop.c */
/* Compute QP for the whole VOP and initialize MB-based RC
reset QPMB[], currVop->quantizer, rc->Ec, video->header_bits */
/* to In order to work RC_VopQPSetting has to do the followings
1. Set video->QPMB of all macroblocks.
2. Set currVop->quantizer
3. Reset video->header_bits to zero.
4. Initialize internal RC parameters for Vop cooding */
/* In/out : */
/* Return : PV_STATUS */
/* Modified : */
/* ======================================================================== */
/* To be moved to rate_control.c and separate between BX_RC and ANNEX_L */
PV_STATUS RC_VopQPSetting(VideoEncData *video, rateControl *prc[])
{
Int currLayer = video->currLayer;
Vol *currVol = video->vol[currLayer];
Vop *currVop = video->currVop;
#ifdef TEST_MBBASED_QP
int i;
#endif
rateControl *rc = video->rc[currLayer];
MultiPass *pMP = video->pMP[currLayer];
OSCL_UNUSED_ARG(prc);
if (video->encParams->RC_Type == CONSTANT_Q)
{
M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB);
return PV_SUCCESS;
}
else
{
if (video->rc[currLayer]->encoded_frames == 0) /* rc[currLayer]->totalFrameNumber*/
{
M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB);
video->rc[currLayer]->Qc = video->encParams->InitQuantIvop[currLayer];
}
else
{
calculateQuantizer_Multipass((void*) video);
currVop->quantizer = video->rc[currLayer]->Qc;
#ifdef TEST_MBBASED_QP
i = currVol->nTotalMB; /* testing changing QP at MB level */
while (i)
{
i--;
video->QPMB[i] = (i & 1) ? currVop->quantizer - 1 : currVop->quantizer + 1;
}
#else
M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB);
#endif
}
video->header_bits = 0;
}
/* update pMP->framePos */
if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0;
if (rc->T == 0)
{
pMP->counter_BTdst = (Int)(video->encParams->LayerFrameRate[video->currLayer] * 7.5 + 0.5); /* 0.75s time frame */
pMP->counter_BTdst = PV_MIN(pMP->counter_BTdst, (Int)(rc->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */
pMP->counter_BTdst = PV_MAX(pMP->counter_BTdst, (Int)((rc->Bs / 2 - rc->VBV_fullness) * 0.30 / (rc->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */
pMP->counter_BTdst = PV_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */
pMP->target_bits = rc->T = rc->TMN_TH = (Int)(rc->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1));
pMP->diff_counter = pMP->counter_BTdst;
}
/* collect the necessary data: target bits, actual bits, mad and QP */
pMP->target_bits = rc->T;
pMP->QP = currVop->quantizer;
pMP->mad = video->sumMAD / (float)currVol->nTotalMB;
if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
pMP->bitrate = rc->bitrate; /* calculated in RCVopQPSetting */
pMP->framerate = rc->framerate;
/* first pass encoding */
pMP->nRe_Quantized = 0;
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : SaveRDSamples() */
/* Date : 08/29/2001 */
/* History : */
/* Purpose : Save QP, actual_bits, mad and R_D of the current iteration */
/* In/out : */
/* Return : */
/* Modified : */
/* */
/* ======================================================================== */
Void SaveRDSamples(MultiPass *pMP, Int counter_samples)
{
/* for pMP->pRDSamples */
pMP->pRDSamples[pMP->framePos][counter_samples].QP = pMP->QP;
pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits;
pMP->pRDSamples[pMP->framePos][counter_samples].mad = pMP->mad;
pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (float)(pMP->actual_bits / (pMP->mad + 0.0001));
return ;
}
/* ======================================================================== */
/* Function : RC_VopUpdateStat */
/* Date : 12/20/2000 */
/* Purpose : Update statistics for rate control after encoding each VOP. */
/* No need to change anything in VideoEncData structure. */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS RC_VopUpdateStat(VideoEncData *video, rateControl *rc)
{
Int currLayer = video->currLayer;
Vol *currVol = video->vol[currLayer];
MultiPass *pMP = video->pMP[currLayer];
Int diff_BTCounter;
switch (video->encParams->RC_Type)
{
case CONSTANT_Q:
break;
case CBR_1:
case CBR_2:
case VBR_1:
case VBR_2:
case CBR_LOWDELAY:
pMP->actual_bits = currVol->stream->byteCount << 3;
SaveRDSamples(pMP, 0);
pMP->encoded_frames++;
/* for pMP->samplesPerFrame */
pMP->samplesPerFrame[pMP->framePos] = 0;
pMP->sum_QP += pMP->QP;
/* update pMP->counter_BTsrc, pMP->counter_BTdst */
/* re-allocate the target bit again and then stop encoding */
diff_BTCounter = (Int)((float)(rc->TMN_TH - rc->TMN_W - pMP->actual_bits) /
(pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1);
if (diff_BTCounter >= 0)
pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */
else
pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */
rc->TMN_TH -= (Int)((float)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1));
rc->T = pMP->target_bits = rc->TMN_TH - rc->TMN_W;
pMP->diff_counter -= diff_BTCounter;
rc->Rc = currVol->stream->byteCount << 3; /* Total Bits for current frame */
rc->Hc = video->header_bits; /* Total Bits in Header and Motion Vector */
/* BX_RC */
updateRateControl(rc, video);
break;
default: /* for case CBR_1/2, VBR_1/2 */
return PV_FAIL;
}
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : RC_GetSkipNextFrame, RC_GetRemainingVops */
/* Date : 2/20/2001 */
/* Purpose : To access RC parameters from other parts of the code. */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
Int RC_GetSkipNextFrame(VideoEncData *video, Int currLayer)
{
return video->rc[currLayer]->skip_next_frame;
}
void RC_ResetSkipNextFrame(VideoEncData *video, Int currLayer)
{
video->rc[currLayer]->skip_next_frame = 0;
return ;
}
/* ======================================================================== */
/* Function : RC_UpdateBuffer */
/* Date : 2/20/2001 */
/* Purpose : Update RC in case of there are frames skipped (camera freeze)*/
/* from the application level in addition to what RC requested */
/* In/out : Nr, B, Rr */
/* Return : Void */
/* Modified : */
/* ======================================================================== */
PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip)
{
rateControl *rc = video->rc[currLayer];
MultiPass *pMP = video->pMP[currLayer];
if (video == NULL || rc == NULL || pMP == NULL)
return PV_FAIL;
rc->VBV_fullness -= (Int)(rc->bitrate / rc->framerate * num_skip); //rc[currLayer]->Rp;
pMP->counter_BTsrc += 10 * num_skip;
/* Check buffer underflow */
if (rc->VBV_fullness < rc->low_bound)
{
rc->VBV_fullness = rc->low_bound; // -rc->Bs/2;
rc->TMN_W = rc->VBV_fullness - rc->low_bound;
pMP->counter_BTsrc = pMP->counter_BTdst + (Int)((float)(rc->Bs / 2 - rc->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
}
return PV_SUCCESS;
}
/* ======================================================================== */
/* Function : RC_UpdateBXRCParams */
/* Date : 4/08/2002 */
/* Purpose : Update RC parameters specifically for target bitrate or */
/* framerate update during an encoding session */
/* In/out : */
/* Return : PV_TRUE if successed, PV_FALSE if failed. */
/* Modified : */
/* ======================================================================== */
PV_STATUS RC_UpdateBXRCParams(void *input)
{
VideoEncData *video = (VideoEncData *) input;
VideoEncParams *encParams = video->encParams;
rateControl **rc = video->rc;
Int numLayers = encParams->nLayers;
Int *LayerBitRate = encParams->LayerBitRate;
float *LayerFrameRate = encParams->LayerFrameRate;
MultiPass **pMP = video->pMP;
Int n, VBV_fullness;
Int diff_counter;
extern Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized);
/* Reset video buffer size due to target bitrate change */
SetProfile_BufferSize(video, video->encParams->VBV_delay, 0); /* output: video->encParams->BufferSize[] */
for (n = 0; n < numLayers; n++)
{
/* Remaining stuff about frame dropping and underflow check in update RC */
updateRC_PostProc(rc[n], video);
rc[n]->skip_next_frame = 0; /* must be initialized */
/* New changes: bitrate and framerate, Bs, max_BitVariance_num, TMN_TH(optional), encoded_frames(optional) */
rc[n]->Bs = video->encParams->BufferSize[n];
VBV_fullness = (Int)(rc[n]->Bs * 0.5);
if (n == 0)
{
rc[n]->TMN_TH = (Int)((float)LayerBitRate[n] / LayerFrameRate[n]);
rc[n]->bitrate = pMP[n]->bitrate = LayerBitRate[n];
rc[n]->framerate = pMP[n]->framerate = LayerFrameRate[n];
// For h263 or short header mode, the bit variation is within (-2*Rmax*1001/3000, 2*Rmax*1001/3000)
if (video->encParams->H263_Enabled)
{
rc[n]->max_BitVariance_num = (Int)((rc[n]->Bs - video->encParams->maxFrameSize) / 2 / (rc[n]->bitrate / rc[n]->framerate / 10.0)) - 5;
//rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness)/((float)LayerBitRate[n]/LayerFrameRate[n]/10.0))-5;
}
else // MPEG-4 normal modes
{
rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - VBV_fullness) * 10 / ((float)LayerBitRate[n] / LayerFrameRate[n])) - 5;
}
}
else
{
if (LayerFrameRate[n] - LayerFrameRate[n-1] > 0) /* 7/31/03 */
{
rc[n]->TMN_TH = (Int)((float)(LayerBitRate[n] - LayerBitRate[n-1]) / (LayerFrameRate[n] - LayerFrameRate[n-1]));
rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - VBV_fullness) * 10 / ((float)rc[n]->TMN_TH)) - 5;
if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;
}
else /* 7/31/03 */
{
rc[n]->TMN_TH = 1 << 30;
rc[n]->max_BitVariance_num = 0;
}
rc[n]->bitrate = pMP[n]->bitrate = LayerBitRate[n] - LayerBitRate[n-1];
rc[n]->framerate = pMP[n]->framerate = LayerFrameRate[n] - LayerFrameRate[n-1];
}
pMP[n]->target_bits_per_frame_prev = pMP[n]->target_bits_per_frame;
pMP[n]->target_bits_per_frame = pMP[n]->bitrate / (float)(pMP[n]->framerate + 0.0001); /* 7/31/03 */
/* rc[n]->VBV_fullness and rc[n]->TMN_W should be kept same */
/* update pMP[n]->counter_BTdst and pMP[n]->counter_BTsrc */
diff_counter = (Int)((float)(rc[n]->VBV_fullness - rc[n]->TMN_W) /
(pMP[n]->target_bits_per_frame / 10 + 0.0001)); /* 7/31/03 */
pMP[n]->counter_BTdst = pMP[n]->counter_BTsrc = 0;
if (diff_counter > 0)
pMP[n]->counter_BTdst = diff_counter;
else if (diff_counter < 0)
pMP[n]->counter_BTsrc = -diff_counter;
rc[n]->TMN_W = (Int)(rc[n]->VBV_fullness - /* re-calculate rc[n]->TMN_W in order for higher accuracy */
(pMP[n]->target_bits_per_frame / 10) * (pMP[n]->counter_BTdst - pMP[n]->counter_BTsrc));
/* Keep the current average mad */
if (pMP[n]->aver_mad != 0)
{
pMP[n]->aver_mad_prev = pMP[n]->aver_mad;
pMP[n]->encoded_frames_prev = pMP[n]->encoded_frames;
}
pMP[n]->aver_mad = 0;
pMP[n]->overlapped_win_size = 4;
/* Misc */
pMP[n]->sum_mad = pMP[n]->sum_QP = 0;
//pMP[n]->encoded_frames_prev = pMP[n]->encoded_frames;
pMP[n]->encoded_frames = pMP[n]->re_encoded_frames = pMP[n]->re_encoded_times = 0;
} /* end of: for(n=0; n<numLayers; n++) */
return PV_SUCCESS;
}
/* ================================================================================ */
/* Function : targetBitCalculation */
/* Date : 10/01/2001 */
/* Purpose : quadratic bit allocation model: T(n) = C*sqrt(mad(n)/aver_mad(n-1)) */
/* */
/* In/out : rc->T */
/* Return : Void */
/* Modified : */
/* ================================================================================ */
void targetBitCalculation(void *input)
{
VideoEncData *video = (VideoEncData *) input;
MultiPass *pMP = video->pMP[video->currLayer];
Vol *currVol = video->vol[video->currLayer];
rateControl *rc = video->rc[video->currLayer];
float curr_mad;//, average_mad;
Int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound;
/* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */
if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL)
return;
/* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/
updateRC_PostProc(rc, video);
/* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */
if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000)
{
pMP->counter_BTsrc -= 1000;
pMP->counter_BTdst -= 1000;
}
/* ---------------------------------------------------------------------------------------------------*/
/* target calculation */
curr_mad = video->sumMAD / (float)currVol->nTotalMB;
if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
diff_counter_BTsrc = diff_counter_BTdst = 0;
pMP->diff_counter = 0;
/*1.calculate average mad */
pMP->sum_mad += curr_mad;
//average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(float)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/
//pMP->aver_mad = average_mad;
if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */
pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1);
if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0) /* 7/31/03 */
pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1);
/*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */
if (pMP->overlapped_win_size == 0)
{
/* original verison */
if (curr_mad > pMP->aver_mad*1.1)
{
if (curr_mad / (pMP->aver_mad + 0.0001) > 2)
diff_counter_BTdst = (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10;
//diff_counter_BTdst = (Int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10;
else
diff_counter_BTdst = (Int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10;
}
else /* curr_mad <= average_mad*1.1 */
//diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4);
diff_counter_BTsrc = 10 - (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5);
//diff_counter_BTsrc = 10 - (Int)(curr_mad/pMP->aver_mad/0.1 + 0.5)
/* actively fill in the possible gap */
if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&
curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)
diff_counter_BTsrc = 1;
}
else if (pMP->overlapped_win_size > 0)
{
/* transition time: use previous average mad "pMP->aver_mad_prev" instead of the current average mad "pMP->aver_mad" */
if (curr_mad > pMP->aver_mad_prev*1.1)
{
if (curr_mad / pMP->aver_mad_prev > 2)
diff_counter_BTdst = (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10;
//diff_counter_BTdst = (Int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10;
else
diff_counter_BTdst = (Int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10;
}
else /* curr_mad <= average_mad*1.1 */
//diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4);
diff_counter_BTsrc = 10 - (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5);
//diff_counter_BTsrc = 10 - (Int)(curr_mad/pMP->aver_mad_prev/0.1 + 0.5)
/* actively fill in the possible gap */
if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&
curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)
diff_counter_BTsrc = 1;
if (--pMP->overlapped_win_size <= 0) pMP->overlapped_win_size = 0;
}
/* if difference is too much, do clipping */
/* First, set the upper bound for current bit allocation variance: 80% of available buffer */
bound = (Int)((rc->Bs / 2 - rc->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rc->Bs */
diff_counter_BTsrc = PV_MIN(diff_counter_BTsrc, bound);
diff_counter_BTdst = PV_MIN(diff_counter_BTdst, bound);
/* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */
bound = 50;
// if(video->encParams->RC_Type == CBR_LOWDELAY)
// not necessary bound = 10; /* 1/17/02 -- For Low delay */
diff_counter_BTsrc = PV_MIN(diff_counter_BTsrc, bound);
diff_counter_BTdst = PV_MIN(diff_counter_BTdst, bound);
/* Third, check the buffer */
prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc;
curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc);
if (PV_ABS(prev_counter_diff) >= rc->max_BitVariance_num || PV_ABS(curr_counter_diff) >= rc->max_BitVariance_num) // PV_ABS(curr_counter_diff) >= PV_ABS(prev_counter_diff) )
{ //diff_counter_BTsrc = diff_counter_BTdst = 0;
if (curr_counter_diff > rc->max_BitVariance_num && diff_counter_BTdst)
{
diff_counter_BTdst = (rc->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc;
if (diff_counter_BTdst < 0) diff_counter_BTdst = 0;
}
else if (curr_counter_diff < -rc->max_BitVariance_num && diff_counter_BTsrc)
{
diff_counter_BTsrc = diff_counter_BTdst - (-rc->max_BitVariance_num - prev_counter_diff);
if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0;
}
}
/*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */
//rc->TMN_TH = (Int)((float)pMP->bitrate/pMP->framerate);
rc->TMN_TH = (Int)(pMP->target_bits_per_frame);
pMP->diff_counter = 0;
if (diff_counter_BTsrc)
{
rc->TMN_TH -= (Int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1);
pMP->diff_counter = -diff_counter_BTsrc;
}
else if (diff_counter_BTdst)
{
rc->TMN_TH += (Int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1);
pMP->diff_counter = diff_counter_BTdst;
}
/*4.update pMP->counter_BTsrc, pMP->counter_BTdst */
pMP->counter_BTsrc += diff_counter_BTsrc;
pMP->counter_BTdst += diff_counter_BTdst;
/*5.target bit calculation */
rc->T = rc->TMN_TH - rc->TMN_W;
//rc->T = rc->TMN_TH - (Int)((float)rc->TMN_W/rc->frameRate);
if (video->encParams->H263_Enabled && rc->T > video->encParams->maxFrameSize)
{
rc->T = video->encParams->maxFrameSize; // added this 11/07/05
}
}
/* ================================================================================ */
/* Function : calculateQuantizer_Multipass */
/* Date : 10/01/2001 */
/* Purpose : variable rate bit allocation + new QP determination scheme */
/* */
/* In/out : rc->T and rc->Qc */
/* Return : Void */
/* Modified : */
/* ================================================================================ */
/* Mad based variable bit allocation + QP calculation with a new quadratic method */
void calculateQuantizer_Multipass(void *input)
{
VideoEncData *video = (VideoEncData *) input;
MultiPass *pMP = video->pMP[video->currLayer];
Vol *currVol = video->vol[video->currLayer];
rateControl *rc = video->rc[video->currLayer];
Int prev_QP, prev_actual_bits, curr_target, i, j;
float curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP;
if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL)
return;
/* Mad based variable bit allocation */
targetBitCalculation((void*) video);
if (rc->T <= 0 || video->sumMAD == 0)
{
if (rc->T < 0) rc->Qc = 31;
return;
}
/* ---------------------------------------------------------------------------------------------------*/
/* current frame QP estimation */
curr_target = rc->T;
curr_mad = video->sumMAD / (float)currVol->nTotalMB;
if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
curr_RD = (float)curr_target / curr_mad;
/* Another version of search the optimal point */
prev_actual_bits = pMP->pRDSamples[0][0].actual_bits;
prev_mad = pMP->pRDSamples[0][0].mad;
for (i = 0, j = 0; i < pMP->frameRange; i++)
{
if (pMP->pRDSamples[i][0].mad != 0 && prev_mad != 0 &&
PV_ABS(prev_mad - curr_mad) > PV_ABS(pMP->pRDSamples[i][0].mad - curr_mad))
{
prev_mad = pMP->pRDSamples[i][0].mad;
prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;
j = i;
}
}
prev_QP = pMP->pRDSamples[j][0].QP;
for (i = 1; i < pMP->samplesPerFrame[j]; i++)
{
if (PV_ABS(prev_actual_bits - curr_target) > PV_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target))
{
prev_actual_bits = pMP->pRDSamples[j][i].actual_bits;
prev_QP = pMP->pRDSamples[j][i].QP;
}
}
// quadratic approximation
prev_RD = (float)prev_actual_bits / prev_mad;
//rc->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4);
if (prev_QP == 1) // 11/14/05, added this to allow getting out of QP = 1 easily
{
rc->Qc = (Int)(prev_RD / curr_RD + 0.5);
}
else
{
rc->Qc = (Int)(prev_QP * M4VENC_SQRT(prev_RD / curr_RD) + 0.9);
if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0)
rc->Qc = (Int)(prev_QP * (M4VENC_SQRT(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */
else
rc->Qc = (Int)(prev_QP * (M4VENC_SQRT(prev_RD / curr_RD) + M4VENC_POW(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9);
}
//rc->Qc =(Int)(prev_QP * sqrt(prev_RD/curr_RD) + 0.4);
// 11/08/05
// lower bound on Qc should be a function of curr_mad
// When mad is already low, lower bound on Qc doesn't have to be small.
// Note, this doesn't work well for low complexity clip encoded at high bit rate
// it doesn't hit the target bit rate due to this QP lower bound.
/// if((curr_mad < 8) && (rc->Qc < 12)) rc->Qc = 12;
// else if((curr_mad < 128) && (rc->Qc < 3)) rc->Qc = 3;
if (rc->Qc < 1) rc->Qc = 1;
if (rc->Qc > 31) rc->Qc = 31;
/* active bit resource protection */
aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (float)pMP->encoded_frames);
average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (float)pMP->encoded_frames); /* this function is called from the scond encoded frame*/
if (pMP->diff_counter == 0 &&
((float)rc->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) &&
pMP->counter_BTsrc <= (pMP->counter_BTdst + (Int)(pMP->framerate*1.0 + 0.5)))
{
rc->TMN_TH -= (Int)(pMP->target_bits_per_frame / 10.0);
rc->T = rc->TMN_TH - rc->TMN_W;
pMP->counter_BTsrc++;
pMP->diff_counter--;
}
}
/* ======================================================================== */
/* Function : updateRateControl */
/* Date : 11/17/2000 */
/* Purpose :Update the RD Modal (After Encoding the Current Frame) */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
void updateRateControl(rateControl *rc, VideoEncData *video)
{
Int frame_bits;
/* rate contro\l */
frame_bits = (Int)(rc->bitrate / rc->framerate);
rc->TMN_W += (rc->Rc - rc->TMN_TH);
rc->VBV_fullness += (rc->Rc - frame_bits); //rc->Rp);
//if(rc->VBV_fullness < 0) rc->VBV_fullness = -1;
rc->encoded_frames++;
/* frame dropping */
rc->skip_next_frame = 0;
if ((video->encParams->H263_Enabled && rc->Rc > video->encParams->maxFrameSize) || /* For H263/short header mode, drop the frame if the actual frame size exceeds the bound */
(rc->VBV_fullness > rc->Bs / 2 && !rc->no_pre_skip)) /* skip the current frame */ /* rc->Bs */
{
rc->TMN_W -= (rc->Rc - rc->TMN_TH);
rc->VBV_fullness -= rc->Rc;
rc->skip_next_frame = -1;
}
else if ((float)(rc->VBV_fullness - rc->VBV_fullness_offset) > (rc->Bs / 2 - rc->VBV_fullness_offset)*0.95 &&
!rc->no_frame_skip) /* skip next frame */
{
rc->VBV_fullness -= frame_bits; //rc->Rp;
rc->skip_next_frame = 1;
/* skip more than 1 frames */
//while(rc->VBV_fullness > rc->Bs*0.475)
while ((rc->VBV_fullness - rc->VBV_fullness_offset) > (rc->Bs / 2 - rc->VBV_fullness_offset)*0.95)
{
rc->VBV_fullness -= frame_bits; //rc->Rp;
rc->skip_next_frame++;
}
/* END */
}
}
/* ======================================================================== */
/* Function : updateRC_PostProc */
/* Date : 04/08/2002 */
/* Purpose : Remaing RC update stuff for frame skip and buffer underflow */
/* check */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
void updateRC_PostProc(rateControl *rc, VideoEncData *video)
{
MultiPass *pMP = video->pMP[video->currLayer];
if (rc->skip_next_frame == 1 && !rc->no_frame_skip) /* skip next frame */
{
pMP->counter_BTsrc += 10 * rc->skip_next_frame;
}
else if (rc->skip_next_frame == -1 && !rc->no_pre_skip) /* skip current frame */
{
pMP->counter_BTdst -= pMP->diff_counter;
pMP->counter_BTsrc += 10;
pMP->sum_mad -= pMP->mad;
pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (float)(pMP->encoded_frames - 1 + 0.0001);
pMP->sum_QP -= pMP->QP;
pMP->encoded_frames --;
}
/* some stuff in update VBV_fullness remains here */
//if(rc->VBV_fullness < -rc->Bs/2) /* rc->Bs */
if (rc->VBV_fullness < rc->low_bound)
{
rc->VBV_fullness = rc->low_bound; // -rc->Bs/2;
rc->TMN_W = rc->VBV_fullness - rc->low_bound;
pMP->counter_BTsrc = pMP->counter_BTdst + (Int)((float)(rc->Bs / 2 - rc->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
}
}

View File

@ -0,0 +1,96 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _RATE_CONTROL_H_
#define _RATE_CONTROL_H_
#include "mp4def.h"
typedef struct tagdataPointArray
{
Int Qp;
Int Rp;
float Mp; /* for MB-based RC, 3/14/01 */
struct tagdataPointArray *next;
struct tagdataPointArray *prev;
} dataPointArray;
typedef struct
{
Int alpha; /* weight for I frame */
Int Rs; /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */
Int Rc; /*bits used for the current frame. It is the bit count obtained after encoding. */
Int Rp; /*bits to be removed from the buffer per picture. */
/*? is this the average one, or just the bits coded for the previous frame */
Int Rps; /*bit to be removed from buffer per src frame */
float Ts; /*number of seconds for the sequence (or segment). e.g., 10 sec */
float Ep;
float Ec; /*mean absolute difference for the current frame after motion compensation.*/
/*If the macroblock is intra coded, the original spatial pixel values are summed.*/
Int Qc; /*quantization level used for the current frame. */
Int Nr; /*number of P frames remaining for encoding.*/
Int Rr; /*number of bits remaining for encoding this sequence (or segment).*/
Int Rr_Old;/* 12/24/00 */
Int T; /*target bit to be used for the current frame.*/
Int S; /*number of bits used for encoding the previous frame.*/
Int Hc; /*header and motion vector bits used in the current frame. It includes all the information except to the residual information.*/
Int Hp; /*header and motion vector bits used in the previous frame. It includes all the information except to the residual information.*/
Int Ql; /*quantization level used in the previous frame */
Int Bs; /*buffer size e.g., R/2 */
Int B; /*current buffer level e.g., R/4 - start from the middle of the buffer */
float X1;
float X2;
float X11;
float M; /*safe margin for the buffer */
float smTick; /*ratio of src versus enc frame rate */
double remnant; /*remainder frame of src/enc frame for fine frame skipping */
Int timeIncRes; /* vol->timeIncrementResolution */
dataPointArray *end; /*quantization levels for the past (20) frames */
Int frameNumber; /* ranging from 0 to 20 nodes*/
Int w;
Int Nr_Original;
Int Nr_Old, Nr_Old2;
Int skip_next_frame;
Int Qdep; /* smooth Q adjustment */
Int fine_frame_skip;
Int VBR_Enabled;
Int no_frame_skip;
Int no_pre_skip;
Int totalFrameNumber; /* total coded frames, for debugging!!*/
char oFirstTime;
/* BX rate control */
Int TMN_W;
Int TMN_TH;
Int VBV_fullness;
Int max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/
Int encoded_frames; /* counter for all encoded frames */
float framerate;
Int bitrate;
Int low_bound; /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */
Int VBV_fullness_offset; /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/
/* End BX */
} rateControl;
#endif /* _RATE_CONTROL_H_ */

View File

@ -0,0 +1,375 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4lib_int.h"
#include "sad_inline.h"
#define Cached_lx 176
#ifdef _SAD_STAT
ULong num_sad_MB = 0;
ULong num_sad_Blk = 0;
ULong num_sad_MB_call = 0;
ULong num_sad_Blk_call = 0;
#define NUM_SAD_MB_CALL() num_sad_MB_call++
#define NUM_SAD_MB() num_sad_MB++
#define NUM_SAD_BLK_CALL() num_sad_Blk_call++
#define NUM_SAD_BLK() num_sad_Blk++
#else
#define NUM_SAD_MB_CALL()
#define NUM_SAD_MB()
#define NUM_SAD_BLK_CALL()
#define NUM_SAD_BLK()
#endif
/* consist of
Int SAD_Macroblock_C(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)
Int SAD_MB_HTFM_Collect(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)
Int SAD_MB_HTFM(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)
Int SAD_Block_C(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)
Int SAD_Blk_PADDING(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info)
Int SAD_MB_PADDING(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info)
Int SAD_MB_PAD1(UChar *ref,UChar *cur,Int dmin,Int lx,Int *rep);
Int SAD_MB_PADDING_HTFM_Collect(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info)
Int SAD_MB_PADDING_HTFM(UChar *ref,UChar *cur,Int dmin,Int lx,void *vptr)
*/
#ifdef __cplusplus
extern "C"
{
#endif
Int SAD_MB_PAD1(UChar *ref, UChar *cur, Int dmin, Int lx, Int *rep);
/*==================================================================
Function: SAD_Macroblock
Date: 09/07/2000
Purpose: Compute SAD 16x16 between blk and ref.
To do: Uniform subsampling will be inserted later!
Hypothesis Testing Fast Matching to be used later!
Changes:
11/7/00: implemented MMX
1/24/01: implemented SSE
==================================================================*/
/********** C ************/
Int SAD_Macroblock_C(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info)
{
int32 x10;
Int dmin = (ULong)dmin_lx >> 16;
Int lx = dmin_lx & 0xFFFF;
OSCL_UNUSED_ARG(extra_info);
NUM_SAD_MB_CALL();
x10 = simd_sad_mb(ref, blk, dmin, lx);
return x10;
}
#ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */
/*===============================================================
Function: SAD_MB_HTFM_Collect and SAD_MB_HTFM
Date: 3/2/1
Purpose: Compute the SAD on a 16x16 block using
uniform subsampling and hypothesis testing fast matching
for early dropout. SAD_MB_HP_HTFM_Collect is to collect
the statistics to compute the thresholds to be used in
SAD_MB_HP_HTFM.
Input/Output:
Changes:
===============================================================*/
Int SAD_MB_HTFM_Collect(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info)
{
Int i;
Int sad = 0;
UChar *p1;
Int lx4 = (dmin_lx << 2) & 0x3FFFC;
ULong cur_word;
Int saddata[16], tmp, tmp2; /* used when collecting flag (global) is on */
Int difmad;
HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
UInt *countbreak = &(htfm_stat->countbreak);
Int *offsetRef = htfm_stat->offsetRef;
NUM_SAD_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++)
{
p1 = ref + offsetRef[i];
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
NUM_SAD_MB();
saddata[i] = sad;
if (i > 0)
{
if ((ULong)sad > ((ULong)dmin_lx >> 16))
{
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
}
}
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
Int SAD_MB_HTFM(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info)
{
Int sad = 0;
UChar *p1;
Int i;
Int tmp, tmp2;
Int lx4 = (dmin_lx << 2) & 0x3FFFC;
Int sadstar = 0, madstar;
Int *nrmlz_th = (Int*) extra_info;
Int *offsetRef = (Int*) extra_info + 32;
ULong cur_word;
madstar = (ULong)dmin_lx >> 20;
NUM_SAD_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++)
{
p1 = ref + offsetRef[i];
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = (cur_word >> 24) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[8];
tmp2 = (cur_word >> 16) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[4];
tmp2 = (cur_word >> 8) & 0xFF;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = p1[0];
p1 += lx4;
tmp2 = (cur_word & 0xFF);
sad = SUB_SAD(sad, tmp, tmp2);
NUM_SAD_MB();
sadstar += madstar;
if (((ULong)sad <= ((ULong)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++)))
;
else
return 65536;
}
return sad;
}
#endif /* HTFM */
#ifndef NO_INTER4V
/*==================================================================
Function: SAD_Block
Date: 09/07/2000
Purpose: Compute SAD 16x16 between blk and ref.
To do: Uniform subsampling will be inserted later!
Hypothesis Testing Fast Matching to be used later!
Changes:
11/7/00: implemented MMX
1/24/01: implemented SSE
==================================================================*/
/********** C ************/
Int SAD_Block_C(UChar *ref, UChar *blk, Int dmin, Int lx, void *)
{
Int sad = 0;
Int i;
UChar *ii;
Int *kk;
Int tmp, tmp2, tmp3, mask = 0xFF;
Int width = (lx - 32);
NUM_SAD_BLK_CALL();
ii = ref;
kk = (Int*)blk; /* assuming word-align for blk */
for (i = 0; i < 8; i++)
{
tmp3 = kk[1];
tmp = ii[7];
tmp2 = (UInt)tmp3 >> 24;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = ii[6];
tmp2 = (tmp3 >> 16) & mask;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = ii[5];
tmp2 = (tmp3 >> 8) & mask;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = ii[4];
tmp2 = tmp3 & mask;
sad = SUB_SAD(sad, tmp, tmp2);
tmp3 = *kk;
kk += (width >> 2);
tmp = ii[3];
tmp2 = (UInt)tmp3 >> 24;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = ii[2];
tmp2 = (tmp3 >> 16) & mask;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = ii[1];
tmp2 = (tmp3 >> 8) & mask;
sad = SUB_SAD(sad, tmp, tmp2);
tmp = *ii;
ii += lx;
tmp2 = tmp3 & mask;
sad = SUB_SAD(sad, tmp, tmp2);
NUM_SAD_BLK();
if (sad > dmin)
return sad;
}
return sad;
}
#endif /* NO_INTER4V */
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,855 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/* contains
Int HalfPel1_SAD_MB(UChar *ref,UChar *blk,Int dmin,Int width,Int ih,Int jh)
Int HalfPel2_SAD_MB(UChar *ref,UChar *blk,Int dmin,Int width)
Int HalfPel1_SAD_Blk(UChar *ref,UChar *blk,Int dmin,Int width,Int ih,Int jh)
Int HalfPel2_SAD_Blk(UChar *ref,UChar *blk,Int dmin,Int width)
Int SAD_MB_HalfPel_C(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)
Int SAD_MB_HP_HTFM_Collect(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)
Int SAD_MB_HP_HTFM(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)
Int SAD_Blk_HalfPel_C(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)
*/
//#include <stdlib.h> /* for RAND_MAX */
#include "mp4def.h"
#include "mp4lib_int.h"
#include "sad_halfpel_inline.h"
#ifdef _SAD_STAT
ULong num_sad_HP_MB = 0;
ULong num_sad_HP_Blk = 0;
ULong num_sad_HP_MB_call = 0;
ULong num_sad_HP_Blk_call = 0;
#define NUM_SAD_HP_MB_CALL() num_sad_HP_MB_call++
#define NUM_SAD_HP_MB() num_sad_HP_MB++
#define NUM_SAD_HP_BLK_CALL() num_sad_HP_Blk_call++
#define NUM_SAD_HP_BLK() num_sad_HP_Blk++
#else
#define NUM_SAD_HP_MB_CALL()
#define NUM_SAD_HP_MB()
#define NUM_SAD_HP_BLK_CALL()
#define NUM_SAD_HP_BLK()
#endif
#ifdef __cplusplus
extern "C"
{
#endif
/*==================================================================
Function: HalfPel1_SAD_MB
Date: 03/27/2001
Purpose: Compute SAD 16x16 between blk and ref in halfpel
resolution,
Changes:
==================================================================*/
/* One component is half-pel */
Int HalfPel1_SAD_MB(UChar *ref, UChar *blk, Int dmin, Int width, Int ih, Int jh)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2;
Int temp;
OSCL_UNUSED_ARG(jh);
p1 = ref;
if (ih) p2 = ref + 1;
else p2 = ref + width;
kk = blk;
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;
sad += PV_ABS(temp);
}
if (sad > dmin)
return sad;
p1 += width;
p2 += width;
}
return sad;
}
/* Two components need half-pel */
Int HalfPel2_SAD_MB(UChar *ref, UChar *blk, Int dmin, Int width)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2, *p3, *p4;
Int temp;
p1 = ref;
p2 = ref + 1;
p3 = ref + width;
p4 = ref + width + 1;
kk = blk;
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;
sad += PV_ABS(temp);
}
if (sad > dmin)
return sad;
p1 += width;
p3 += width;
p2 += width;
p4 += width;
}
return sad;
}
#ifndef NO_INTER4V
/*==================================================================
Function: HalfPel1_SAD_Blk
Date: 03/27/2001
Purpose: Compute SAD 8x8 between blk and ref in halfpel
resolution.
Changes:
==================================================================*/
/* One component needs half-pel */
Int HalfPel1_SAD_Blk(UChar *ref, UChar *blk, Int dmin, Int width, Int ih, Int jh)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2;
Int temp;
OSCL_UNUSED_ARG(jh);
p1 = ref;
if (ih) p2 = ref + 1;
else p2 = ref + width;
kk = blk;
for (i = 0; i < 8; i++)
{
for (j = 0; j < 8; j++)
{
temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;
sad += PV_ABS(temp);
}
if (sad > dmin)
return sad;
p1 += width;
p2 += width;
kk += 8;
}
return sad;
}
/* Two components need half-pel */
Int HalfPel2_SAD_Blk(UChar *ref, UChar *blk, Int dmin, Int width)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2, *p3, *p4;
Int temp;
p1 = ref;
p2 = ref + 1;
p3 = ref + width;
p4 = ref + width + 1;
kk = blk;
for (i = 0; i < 8; i++)
{
for (j = 0; j < 8; j++)
{
temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;
sad += PV_ABS(temp);
}
if (sad > dmin)
return sad;
p1 += width;
p3 += width;
p2 += width;
p4 += width;
kk += 8;
}
return sad;
}
#endif // NO_INTER4V
/*===============================================================
Function: SAD_MB_HalfPel
Date: 09/17/2000
Purpose: Compute the SAD on the half-pel resolution
Input/Output: hmem is assumed to be a pointer to the starting
point of the search in the 33x33 matrix search region
Changes:
11/7/00: implemented MMX
===============================================================*/
/*==================================================================
Function: SAD_MB_HalfPel_C
Date: 04/30/2001
Purpose: Compute SAD 16x16 between blk and ref in halfpel
resolution,
Changes:
==================================================================*/
/* One component is half-pel */
Int SAD_MB_HalfPel_Cxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2, *p3, *p4;
// Int sumref=0;
Int temp;
Int rx = dmin_rx & 0xFFFF;
OSCL_UNUSED_ARG(extra_info);
NUM_SAD_HP_MB_CALL();
p1 = ref;
p2 = ref + 1;
p3 = ref + rx;
p4 = ref + rx + 1;
kk = blk;
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;
sad += PV_ABS(temp);
}
NUM_SAD_HP_MB();
if (sad > (Int)((ULong)dmin_rx >> 16))
return sad;
p1 += rx;
p3 += rx;
p2 += rx;
p4 += rx;
}
return sad;
}
Int SAD_MB_HalfPel_Cyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2;
// Int sumref=0;
Int temp;
Int rx = dmin_rx & 0xFFFF;
OSCL_UNUSED_ARG(extra_info);
NUM_SAD_HP_MB_CALL();
p1 = ref;
p2 = ref + rx; /* either left/right or top/bottom pixel */
kk = blk;
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;
sad += PV_ABS(temp);
}
NUM_SAD_HP_MB();
if (sad > (Int)((ULong)dmin_rx >> 16))
return sad;
p1 += rx;
p2 += rx;
}
return sad;
}
Int SAD_MB_HalfPel_Cxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1;
// Int sumref=0;
Int temp;
Int rx = dmin_rx & 0xFFFF;
OSCL_UNUSED_ARG(extra_info);
NUM_SAD_HP_MB_CALL();
p1 = ref;
kk = blk;
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++;
sad += PV_ABS(temp);
}
NUM_SAD_HP_MB();
if (sad > (Int)((ULong)dmin_rx >> 16))
return sad;
p1 += rx;
}
return sad;
}
#ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */
//Checheck here
Int SAD_MB_HP_HTFM_Collectxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *p1, *p2;
Int rx = dmin_rx & 0xFFFF;
Int refwx4 = rx << 2;
Int saddata[16]; /* used when collecting flag (global) is on */
Int difmad, tmp, tmp2;
HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
UInt *countbreak = &(htfm_stat->countbreak);
Int *offsetRef = htfm_stat->offsetRef;
ULong cur_word;
NUM_SAD_HP_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++) /* 16 stages */
{
p1 = ref + offsetRef[i];
p2 = p1 + rx;
j = 4;/* 4 lines */
do
{
cur_word = *((ULong*)(blk += 4));
tmp = p1[12] + p2[12];
tmp2 = p1[13] + p2[13];
tmp += tmp2;
tmp2 = (cur_word >> 24) & 0xFF;
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[8] + p2[8];
tmp2 = p1[9] + p2[9];
tmp += tmp2;
tmp2 = (cur_word >> 16) & 0xFF;
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[4] + p2[4];
tmp2 = p1[5] + p2[5];
tmp += tmp2;
tmp2 = (cur_word >> 8) & 0xFF;
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
tmp2 = p1[1] + p2[1];
tmp = p1[0] + p2[0];
p1 += refwx4;
p2 += refwx4;
tmp += tmp2;
tmp2 = (cur_word & 0xFF);
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
}
while (--j);
NUM_SAD_HP_MB();
saddata[i] = sad;
if (i > 0)
{
if (sad > (Int)((ULong)dmin_rx >> 16))
{
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
}
}
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
Int SAD_MB_HP_HTFM_Collectyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *p1, *p2;
Int rx = dmin_rx & 0xFFFF;
Int refwx4 = rx << 2;
Int saddata[16]; /* used when collecting flag (global) is on */
Int difmad, tmp, tmp2;
HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
UInt *countbreak = &(htfm_stat->countbreak);
Int *offsetRef = htfm_stat->offsetRef;
ULong cur_word;
NUM_SAD_HP_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++) /* 16 stages */
{
p1 = ref + offsetRef[i];
p2 = p1 + rx;
j = 4;
do
{
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = p2[12];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 24) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[8];
tmp2 = p2[8];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 16) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[4];
tmp2 = p2[4];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 8) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[0];
p1 += refwx4;
tmp2 = p2[0];
p2 += refwx4;
tmp++;
tmp2 += tmp;
tmp = (cur_word & 0xFF);
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
}
while (--j);
NUM_SAD_HP_MB();
saddata[i] = sad;
if (i > 0)
{
if (sad > (Int)((ULong)dmin_rx >> 16))
{
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
}
}
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
Int SAD_MB_HP_HTFM_Collectxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *p1;
Int rx = dmin_rx & 0xFFFF;
Int refwx4 = rx << 2;
Int saddata[16]; /* used when collecting flag (global) is on */
Int difmad, tmp, tmp2;
HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
UInt *countbreak = &(htfm_stat->countbreak);
Int *offsetRef = htfm_stat->offsetRef;
ULong cur_word;
NUM_SAD_HP_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++) /* 16 stages */
{
p1 = ref + offsetRef[i];
j = 4; /* 4 lines */
do
{
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = p1[13];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 24) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[8];
tmp2 = p1[9];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 16) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[4];
tmp2 = p1[5];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 8) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[0];
tmp2 = p1[1];
p1 += refwx4;
tmp++;
tmp2 += tmp;
tmp = (cur_word & 0xFF);
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
}
while (--j);
NUM_SAD_HP_MB();
saddata[i] = sad;
if (i > 0)
{
if (sad > (Int)((ULong)dmin_rx >> 16))
{
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
}
}
difmad = saddata[0] - ((saddata[1] + 1) >> 1);
(*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
(*countbreak)++;
return sad;
}
Int SAD_MB_HP_HTFMxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0, tmp, tmp2;
UChar *p1, *p2;
Int rx = dmin_rx & 0xFFFF;
Int refwx4 = rx << 2;
Int sadstar = 0, madstar;
Int *nrmlz_th = (Int*) extra_info;
Int *offsetRef = nrmlz_th + 32;
ULong cur_word;
madstar = (ULong)dmin_rx >> 20;
NUM_SAD_HP_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++) /* 16 stages */
{
p1 = ref + offsetRef[i];
p2 = p1 + rx;
j = 4; /* 4 lines */
do
{
cur_word = *((ULong*)(blk += 4));
tmp = p1[12] + p2[12];
tmp2 = p1[13] + p2[13];
tmp += tmp2;
tmp2 = (cur_word >> 24) & 0xFF;
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[8] + p2[8];
tmp2 = p1[9] + p2[9];
tmp += tmp2;
tmp2 = (cur_word >> 16) & 0xFF;
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[4] + p2[4];
tmp2 = p1[5] + p2[5];
tmp += tmp2;
tmp2 = (cur_word >> 8) & 0xFF;
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
tmp2 = p1[1] + p2[1];
tmp = p1[0] + p2[0];
p1 += refwx4;
p2 += refwx4;
tmp += tmp2;
tmp2 = (cur_word & 0xFF);
tmp += 2;
sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
}
while (--j);
NUM_SAD_HP_MB();
sadstar += madstar;
if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16))
{
return 65536;
}
}
return sad;
}
Int SAD_MB_HP_HTFMyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0, tmp, tmp2;
UChar *p1, *p2;
Int rx = dmin_rx & 0xFFFF;
Int refwx4 = rx << 2;
Int sadstar = 0, madstar;
Int *nrmlz_th = (Int*) extra_info;
Int *offsetRef = nrmlz_th + 32;
ULong cur_word;
madstar = (ULong)dmin_rx >> 20;
NUM_SAD_HP_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++) /* 16 stages */
{
p1 = ref + offsetRef[i];
p2 = p1 + rx;
j = 4;
do
{
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = p2[12];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 24) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[8];
tmp2 = p2[8];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 16) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[4];
tmp2 = p2[4];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 8) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[0];
p1 += refwx4;
tmp2 = p2[0];
p2 += refwx4;
tmp++;
tmp2 += tmp;
tmp = (cur_word & 0xFF);
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
}
while (--j);
NUM_SAD_HP_MB();
sadstar += madstar;
if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16))
{
return 65536;
}
}
return sad;
}
Int SAD_MB_HP_HTFMxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)
{
Int i, j;
Int sad = 0, tmp, tmp2;
UChar *p1;
Int rx = dmin_rx & 0xFFFF;
Int refwx4 = rx << 2;
Int sadstar = 0, madstar;
Int *nrmlz_th = (Int*) extra_info;
Int *offsetRef = nrmlz_th + 32;
ULong cur_word;
madstar = (ULong)dmin_rx >> 20;
NUM_SAD_HP_MB_CALL();
blk -= 4;
for (i = 0; i < 16; i++) /* 16 stages */
{
p1 = ref + offsetRef[i];
j = 4;/* 4 lines */
do
{
cur_word = *((ULong*)(blk += 4));
tmp = p1[12];
tmp2 = p1[13];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 24) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[8];
tmp2 = p1[9];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 16) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[4];
tmp2 = p1[5];
tmp++;
tmp2 += tmp;
tmp = (cur_word >> 8) & 0xFF;
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
tmp = p1[0];
tmp2 = p1[1];
p1 += refwx4;
tmp++;
tmp2 += tmp;
tmp = (cur_word & 0xFF);
sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
}
while (--j);
NUM_SAD_HP_MB();
sadstar += madstar;
if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16))
{
return 65536;
}
}
return sad;
}
#endif /* HTFM */
#ifndef NO_INTER4V
/*==================================================================
Function: SAD_Blk_HalfPel_C
Date: 04/30/2001
Purpose: Compute SAD 16x16 between blk and ref in halfpel
resolution,
Changes:
==================================================================*/
/* One component is half-pel */
Int SAD_Blk_HalfPel_C(UChar *ref, UChar *blk, Int dmin, Int width, Int rx, Int xh, Int yh, void *extra_info)
{
Int i, j;
Int sad = 0;
UChar *kk, *p1, *p2, *p3, *p4;
Int temp;
OSCL_UNUSED_ARG(extra_info);
NUM_SAD_HP_BLK_CALL();
if (xh && yh)
{
p1 = ref;
p2 = ref + xh;
p3 = ref + yh * rx;
p4 = ref + yh * rx + xh;
kk = blk;
for (i = 0; i < 8; i++)
{
for (j = 0; j < 8; j++)
{
temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - kk[j];
sad += PV_ABS(temp);
}
NUM_SAD_HP_BLK();
if (sad > dmin)
return sad;
p1 += rx;
p3 += rx;
p2 += rx;
p4 += rx;
kk += width;
}
return sad;
}
else
{
p1 = ref;
p2 = ref + xh + yh * rx; /* either left/right or top/bottom pixel */
kk = blk;
for (i = 0; i < 8; i++)
{
for (j = 0; j < 8; j++)
{
temp = ((p1[j] + p2[j] + 1) >> 1) - kk[j];
sad += PV_ABS(temp);
}
NUM_SAD_HP_BLK();
if (sad > dmin)
return sad;
p1 += rx;
p2 += rx;
kk += width;
}
return sad;
}
}
#endif /* NO_INTER4V */
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,130 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*********************************************************************************/
/* Filename: sad_halfpel_inline.h */
/* Description: Implementation for in-line functions used in dct.cpp */
/* Modified: */
/*********************************************************************************/
#ifndef _SAD_HALFPEL_INLINE_H_
#define _SAD_HALFPEL_INLINE_H_
#ifdef __cplusplus
extern "C"
{
#endif
#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER */
__inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
tmp = (tmp2 >> 1) - tmp;
if (tmp > 0) sad += tmp;
else sad -= tmp;
return sad;
}
__inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
tmp = (tmp >> 2) - tmp2;
if (tmp > 0) sad += tmp;
else sad -= tmp;
return sad;
}
#elif defined(__CC_ARM) /* only work with arm v5 */
__inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
__asm
{
rsbs tmp, tmp, tmp2, asr #1 ;
rsbmi tmp, tmp, #0 ;
add sad, sad, tmp ;
}
return sad;
}
__inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
__asm
{
rsbs tmp, tmp2, tmp, asr #2 ;
rsbmi tmp, tmp, #0 ;
add sad, sad, tmp ;
}
return sad;
}
#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */
__inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
register int32 out;
register int32 temp1;
register int32 ss = sad;
register int32 tt = tmp;
register int32 uu = tmp2;
asm volatile("rsbs %1, %3, %4, asr #1\n\t"
"rsbmi %1, %1, #0\n\t"
"add %0, %2, %1"
: "=&r"(out),
"=&r"(temp1)
: "r"(ss),
"r"(tt),
"r"(uu));
return out;
}
__inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
register int32 out;
register int32 temp1;
register int32 ss = sad;
register int32 tt = tmp;
register int32 uu = tmp2;
asm volatile("rsbs %1, %4, %3, asr #2\n\t"
"rsbmi %1, %1, #0\n\t"
"add %0, %2, %1"
: "=&r"(out),
"=&r"(temp1)
: "r"(ss),
"r"(tt),
"r"(uu));
return out;
}
#endif // Diff OS
#ifdef __cplusplus
}
#endif
#endif //_SAD_HALFPEL_INLINE_H_

View File

@ -0,0 +1,539 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*********************************************************************************/
/* Filename: sad_inline.h */
/* Description: Implementation for in-line functions used in dct.cpp */
/* Modified: */
/*********************************************************************************/
#ifndef _SAD_INLINE_H_
#define _SAD_INLINE_H_
#ifdef __cplusplus
extern "C"
{
#endif
#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER */
__inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
tmp = tmp - tmp2;
if (tmp > 0) sad += tmp;
else sad -= tmp;
return sad;
}
__inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
{
int32 x7;
x7 = src2 ^ src1; /* check odd/even combination */
if ((uint32)src2 >= (uint32)src1)
{
src1 = src2 - src1; /* subs */
}
else
{
src1 = src1 - src2;
}
x7 = x7 ^ src1; /* only odd bytes need to add carry */
x7 = mask & ((uint32)x7 >> 1);
x7 = (x7 << 8) - x7;
src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */
src1 = src1 ^(x7 >> 7); /* take absolute value of negative byte */
return src1;
}
#define NUMBER 3
#define SHIFT 24
#include "sad_mb_offset.h"
#undef NUMBER
#define NUMBER 2
#undef SHIFT
#define SHIFT 16
#include "sad_mb_offset.h"
#undef NUMBER
#define NUMBER 1
#undef SHIFT
#define SHIFT 8
#include "sad_mb_offset.h"
__inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx)
{
int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
x9 = 0x80808080; /* const. */
x8 = (uint32)ref & 0x3;
if (x8 == 3)
goto SadMBOffset3;
if (x8 == 2)
goto SadMBOffset2;
if (x8 == 1)
goto SadMBOffset1;
// x5 = (x4<<8)-x4; /* x5 = x4*255; */
x4 = x5 = 0;
x6 = 0xFFFF00FF;
ref -= lx;
blk -= 16;
x8 = 16;
LOOP_SAD0:
/****** process 8 pixels ******/
x10 = *((uint32*)(ref += lx));
x11 = *((uint32*)(ref + 4));
x12 = *((uint32*)(blk += 16));
x14 = *((uint32*)(blk + 4));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
/****** process 8 pixels ******/
x10 = *((uint32*)(ref + 8));
x11 = *((uint32*)(ref + 12));
x12 = *((uint32*)(blk + 8));
x14 = *((uint32*)(blk + 12));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
/****************/
x10 = x5 - (x4 << 8); /* extract low bytes */
x10 = x10 + x4; /* add with high bytes */
x10 = x10 + (x10 << 16); /* add with lower half word */
if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */
{
if (--x8)
{
goto LOOP_SAD0;
}
}
return ((uint32)x10 >> 16);
SadMBOffset3:
return sad_mb_offset3(ref, blk, lx, dmin);
SadMBOffset2:
return sad_mb_offset2(ref, blk, lx, dmin);
SadMBOffset1:
return sad_mb_offset1(ref, blk, lx, dmin);
}
#elif defined(__CC_ARM) /* only work with arm v5 */
__inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
__asm
{
rsbs tmp, tmp, tmp2 ;
rsbmi tmp, tmp, #0 ;
add sad, sad, tmp ;
}
return sad;
}
__inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
{
int32 x7;
__asm
{
EOR x7, src2, src1; /* check odd/even combination */
SUBS src1, src2, src1;
EOR x7, x7, src1;
AND x7, mask, x7, lsr #1;
ORRCC x7, x7, #0x80000000;
RSB x7, x7, x7, lsl #8;
ADD src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */
EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */
}
return src1;
}
__inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)
{
int32 x7;
__asm
{
EOR x7, src2, src1; /* check odd/even combination */
ADDS src1, src2, src1;
EOR x7, x7, src1; /* only odd bytes need to add carry */
ANDS x7, mask, x7, rrx;
RSB x7, x7, x7, lsl #8;
SUB src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */
EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */
}
return src1;
}
#define sum_accumulate __asm{ SBC x5, x5, x10; /* accumulate low bytes */ \
BIC x10, x6, x10; /* x10 & 0xFF00FF00 */ \
ADD x4, x4, x10,lsr #8; /* accumulate high bytes */ \
SBC x5, x5, x11; /* accumulate low bytes */ \
BIC x11, x6, x11; /* x11 & 0xFF00FF00 */ \
ADD x4, x4, x11,lsr #8; } /* accumulate high bytes */
#define NUMBER 3
#define SHIFT 24
#define INC_X8 0x08000001
#include "sad_mb_offset.h"
#undef NUMBER
#define NUMBER 2
#undef SHIFT
#define SHIFT 16
#undef INC_X8
#define INC_X8 0x10000001
#include "sad_mb_offset.h"
#undef NUMBER
#define NUMBER 1
#undef SHIFT
#define SHIFT 8
#undef INC_X8
#define INC_X8 0x08000001
#include "sad_mb_offset.h"
__inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx)
{
int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
x9 = 0x80808080; /* const. */
x4 = x5 = 0;
__asm
{
MOVS x8, ref, lsl #31 ;
BHI SadMBOffset3;
BCS SadMBOffset2;
BMI SadMBOffset1;
MVN x6, #0xFF00;
}
LOOP_SAD0:
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 12));
x10 = *((int32*)(ref + 8));
x14 = *((int32*)(blk + 12));
x12 = *((int32*)(blk + 8));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
__asm
{
/****** process 8 pixels ******/
LDR x11, [ref, #4];
LDR x10, [ref], lx ;
LDR x14, [blk, #4];
LDR x12, [blk], #16 ;
}
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
/****************/
x10 = x5 - (x4 << 8); /* extract low bytes */
x10 = x10 + x4; /* add with high bytes */
x10 = x10 + (x10 << 16); /* add with lower half word */
__asm
{
/****************/
RSBS x11, dmin, x10, lsr #16;
ADDLSS x8, x8, #0x10000001;
BLS LOOP_SAD0;
}
return ((uint32)x10 >> 16);
SadMBOffset3:
return sad_mb_offset3(ref, blk, lx, dmin, x8);
SadMBOffset2:
return sad_mb_offset2(ref, blk, lx, dmin, x8);
SadMBOffset1:
return sad_mb_offset1(ref, blk, lx, dmin, x8);
}
#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */
__inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
register int32 out;
register int32 temp1;
register int32 ss = sad;
register int32 tt = tmp;
register int32 uu = tmp2;
asm volatile("rsbs %1, %4, %3\n\t"
"rsbmi %1, %1, #0\n\t"
"add %0, %2, %1"
: "=&r"(out),
"=&r"(temp1)
: "r"(ss),
"r"(tt),
"r"(uu));
return out;
}
__inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
{
register int32 out;
register int32 temp1;
register int32 s1 = src1;
register int32 s2 = src2;
register int32 mm = mask;
asm volatile("eor %0, %3, %2\n\t"
"subs %1, %3, %2\n\t"
"eor %0, %0, %1\n\t"
"and %0, %4, %0, lsr #1\n\t"
"orrcc %0, %0, #0x80000000\n\t"
"rsb %0, %0, %0, lsl #8\n\t"
"add %1, %1, %0, asr #7\n\t"
"eor %1, %1, %0, asr #7"
: "=&r"(out),
"=&r"(temp1)
: "r"(s1),
"r"(s2),
"r"(mm));
return temp1;
}
__inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)
{
register int32 out;
register int32 temp1;
register int32 s1 = src1;
register int32 s2 = src2;
register int32 mm = mask;
asm volatile("eor %1, %3, %2\n\t"
"adds %0, %3, %2\n\t"
"eor %1, %1, %0\n\t"
"ands %1, %4, %1,rrx\n\t"
"rsb %1, %1, %1, lsl #8\n\t"
"sub %0, %0, %1, asr #7\n\t"
"eor %0, %0, %1, asr #7"
: "=&r"(out),
"=&r"(temp1)
: "r"(s1),
"r"(s2),
"r"(mm));
return (out);
}
#define sum_accumulate asm volatile("sbc %0, %0, %1\n\t" \
"bic %1, %4, %1\n\t" \
"add %2, %2, %1, lsr #8\n\t" \
"sbc %0, %0, %3\n\t" \
"bic %3, %4, %3\n\t" \
"add %2, %2, %3, lsr #8" \
:"+r"(x5), "+r"(x10), "+r"(x4), "+r"(x11) \
:"r"(x6));
#define NUMBER 3
#define SHIFT 24
#define INC_X8 0x08000001
#include "sad_mb_offset.h"
#undef NUMBER
#define NUMBER 2
#undef SHIFT
#define SHIFT 16
#undef INC_X8
#define INC_X8 0x10000001
#include "sad_mb_offset.h"
#undef NUMBER
#define NUMBER 1
#undef SHIFT
#define SHIFT 8
#undef INC_X8
#define INC_X8 0x08000001
#include "sad_mb_offset.h"
__inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx)
{
int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
x9 = 0x80808080; /* const. */
x4 = x5 = 0;
x8 = (uint32)ref & 0x3;
if (x8 == 3)
goto SadMBOffset3;
if (x8 == 2)
goto SadMBOffset2;
if (x8 == 1)
goto SadMBOffset1;
asm volatile("mvn %0, #0xFF00": "=r"(x6));
LOOP_SAD0:
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 12));
x10 = *((int32*)(ref + 8));
x14 = *((int32*)(blk + 12));
x12 = *((int32*)(blk + 8));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
asm volatile("ldr %0, [%4, #4]\n\t"
"ldr %1, [%4], %6\n\t"
"ldr %2, [%5, #4]\n\t"
"ldr %3, [%5], #16"
: "=r"(x11), "=r"(x10), "=r"(x14), "=r"(x12), "+r"(ref), "+r"(blk)
: "r"(lx));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
/****************/
x10 = x5 - (x4 << 8); /* extract low bytes */
x10 = x10 + x4; /* add with high bytes */
x10 = x10 + (x10 << 16); /* add with lower half word */
if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */
{
if (--x8)
{
goto LOOP_SAD0;
}
}
return ((uint32)x10 >> 16);
SadMBOffset3:
return sad_mb_offset3(ref, blk, lx, dmin);
SadMBOffset2:
return sad_mb_offset2(ref, blk, lx, dmin);
SadMBOffset1:
return sad_mb_offset1(ref, blk, lx, dmin);
}
#endif // OS
#ifdef __cplusplus
}
#endif
#endif // _SAD_INLINE_H_

View File

@ -0,0 +1,317 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*********************************************************************************/
/* Filename: sad_mb_offset.h */
/* Description: Implementation for in-line functions used in dct.cpp */
/* Modified: */
/*********************************************************************************/
#if !defined(PV_ARM_GCC_V4) && !defined(PV_ARM_GCC_V5) /* ARM GNU COMPILER */
#if (NUMBER==3)
__inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin)
#elif (NUMBER==2)
__inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin)
#elif (NUMBER==1)
__inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin)
#endif
{
int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
// x5 = (x4<<8) - x4;
x4 = x5 = 0;
x6 = 0xFFFF00FF;
x9 = 0x80808080; /* const. */
ref -= NUMBER; /* bic ref, ref, #3 */
ref -= lx;
blk -= 16;
x8 = 16;
#if (NUMBER==3)
LOOP_SAD3:
#elif (NUMBER==2)
LOOP_SAD2:
#elif (NUMBER==1)
LOOP_SAD1:
#endif
/****** process 8 pixels ******/
x10 = *((uint32*)(ref += lx)); /* D C B A */
x11 = *((uint32*)(ref + 4)); /* H G F E */
x12 = *((uint32*)(ref + 8)); /* L K J I */
x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */
x10 = x10 | (x11 << (32 - SHIFT)); /* G F E D */
x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */
x11 = x11 | (x12 << (32 - SHIFT)); /* K J I H */
x12 = *((uint32*)(blk += 16));
x14 = *((uint32*)(blk + 4));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
/****** process 8 pixels ******/
x10 = *((uint32*)(ref + 8)); /* D C B A */
x11 = *((uint32*)(ref + 12)); /* H G F E */
x12 = *((uint32*)(ref + 16)); /* L K J I */
x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24 = 0xFF 0xFF 0xFF ~D */
x10 = x10 | (x11 << (32 - SHIFT)); /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */
x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */
x11 = x11 | (x12 << (32 - SHIFT)); /* ~K ~J ~I ~H */
x12 = *((uint32*)(blk + 8));
x14 = *((uint32*)(blk + 12));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
x5 = x5 + x10; /* accumulate low bytes */
x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
x5 = x5 + x11; /* accumulate low bytes */
x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
/****************/
x10 = x5 - (x4 << 8); /* extract low bytes */
x10 = x10 + x4; /* add with high bytes */
x10 = x10 + (x10 << 16); /* add with lower half word */
if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */
{
if (--x8)
{
#if (NUMBER==3)
goto LOOP_SAD3;
#elif (NUMBER==2)
goto LOOP_SAD2;
#elif (NUMBER==1)
goto LOOP_SAD1;
#endif
}
}
return ((uint32)x10 >> 16);
}
#elif defined(__CC_ARM) /* only work with arm v5 */
#if (NUMBER==3)
__inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8)
#elif (NUMBER==2)
__inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8)
#elif (NUMBER==1)
__inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8)
#endif
{
int32 x4, x5, x6, x9, x10, x11, x12, x14;
x9 = 0x80808080; /* const. */
x4 = x5 = 0;
__asm{
MVN x6, #0xff0000;
BIC ref, ref, #3;
#if (NUMBER==3)
LOOP_SAD3:
#elif (NUMBER==2)
LOOP_SAD2:
#elif (NUMBER==1)
LOOP_SAD1:
#endif
}
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 12));
x12 = *((int32*)(ref + 16));
x10 = *((int32*)(ref + 8));
x14 = *((int32*)(blk + 12));
__asm{
MVN x10, x10, lsr #SHIFT;
BIC x10, x10, x11, lsl #(32-SHIFT);
MVN x11, x11, lsr #SHIFT;
BIC x11, x11, x12, lsl #(32-SHIFT);
LDR x12, [blk, #8];
}
/* process x11 & x14 */
x11 = sad_4pixelN(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixelN(x10, x12, x9);
sum_accumulate;
__asm{
/****** process 8 pixels ******/
LDR x11, [ref, #4];
LDR x12, [ref, #8];
LDR x10, [ref], lx ;
LDR x14, [blk, #4];
MVN x10, x10, lsr #SHIFT;
BIC x10, x10, x11, lsl #(32-SHIFT);
MVN x11, x11, lsr #SHIFT;
BIC x11, x11, x12, lsl #(32-SHIFT);
LDR x12, [blk], #16;
}
/* process x11 & x14 */
x11 = sad_4pixelN(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixelN(x10, x12, x9);
sum_accumulate;
/****************/
x10 = x5 - (x4 << 8); /* extract low bytes */
x10 = x10 + x4; /* add with high bytes */
x10 = x10 + (x10 << 16); /* add with lower half word */
__asm{
RSBS x11, dmin, x10, lsr #16
ADDLSS x8, x8, #INC_X8
#if (NUMBER==3)
BLS LOOP_SAD3;
#elif (NUMBER==2)
BLS LOOP_SAD2;
#elif (NUMBER==1)
BLS LOOP_SAD1;
#endif
}
return ((uint32)x10 >> 16);
}
#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER */
#if (NUMBER==3)
__inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin)
#elif (NUMBER==2)
__inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin)
#elif (NUMBER==1)
__inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin)
#endif
{
int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
// x5 = (x4<<8) - x4;
x4 = x5 = 0;
x6 = 0xFFFF00FF;
x9 = 0x80808080; /* const. */
ref -= NUMBER; /* bic ref, ref, #3 */
ref -= lx;
x8 = 16;
#if (NUMBER==3)
LOOP_SAD3:
#elif (NUMBER==2)
LOOP_SAD2:
#elif (NUMBER==1)
LOOP_SAD1:
#endif
/****** process 8 pixels ******/
x10 = *((uint32*)(ref += lx)); /* D C B A */
x11 = *((uint32*)(ref + 4)); /* H G F E */
x12 = *((uint32*)(ref + 8)); /* L K J I */
int32 shift = SHIFT;
int32 shift2 = 32 - SHIFT;
asm volatile("ldr %3, [%4, #4]\n\t"
"mvn %0, %0, lsr %5\n\t"
"bic %0, %0, %1, lsl %6\n\t"
"mvn %1, %1, lsr %5\n\t"
"bic %1, %1, %2, lsl %6\n\t"
"ldr %2, [%4, #8]"
: "+r"(x10), "+r"(x11), "+r"(x12), "=r"(x14)
: "r"(blk), "r"(shift), "r"(shift2));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
sum_accumulate;
/****** process 8 pixels ******/
x10 = *((uint32*)(ref + 8)); /* D C B A */
x11 = *((uint32*)(ref + 12)); /* H G F E */
x12 = *((uint32*)(ref + 16)); /* L K J I */
asm volatile("ldr %3, [%4, #4]\n\t"
"mvn %0, %0, lsr %5\n\t"
"bic %0, %0, %1, lsl %6\n\t"
"mvn %1, %1, lsr %5\n\t"
"bic %1, %1, %2, lsl %6\n\t"
"ldr %2, [%4, #8]"
: "+r"(x10), "+r"(x11), "+r"(x12), "=r"(x14)
: "r"(blk), "r"(shift), "r"(shift2));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
/* process x12 & x10 */
x10 = sad_4pixel(x10, x12, x9);
sum_accumulate;
/****************/
x10 = x5 - (x4 << 8); /* extract low bytes */
x10 = x10 + x4; /* add with high bytes */
x10 = x10 + (x10 << 16); /* add with lower half word */
if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */
{
if (--x8)
{
#if (NUMBER==3)
goto LOOP_SAD3;
#elif (NUMBER==2)
goto LOOP_SAD2;
#elif (NUMBER==1)
goto LOOP_SAD1;
#endif
}
}
return ((uint32)x10 >> 16);
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,42 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _VLC_ENCODE_H_
#define _VLC_ENCODE_H_
#include "mp4def.h"
#include "mp4enc_api.h"
Int PutCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream);
Int PutMCBPC_Inter(Int cbpc, Int mode, BitstreamEncVideo *bitstream);
Int PutMCBPC_Intra(Int cbpc, Int mode, BitstreamEncVideo *bitstream);
Int PutMV(Int mvint, BitstreamEncVideo *bitstream);
Int PutDCsize_chrom(Int size, BitstreamEncVideo *bitstream);
Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream);
Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream);
Int PutCoeff_Inter_RVLC(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutCoeff_Intra_RVLC(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutRunCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutRunCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutLevelCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Int PutLevelCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream);
Void MB_CodeCoeff(VideoEncData *video, BitstreamEncVideo *bs);
Void BlockCodeCoeff(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, UChar Mode, Int rvlc, Int shortVideoHeader);
#endif /* _VLC_ENCODE_H_ */

View File

@ -0,0 +1,316 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#ifndef _VLC_ENCODE_INLINE_H_
#define _VLC_ENCODE_INLINE_H_
#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4)
__inline Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc)
{
Int idx, run, level, j;
UInt end, match;
idx = 0;
j = 0;
run = 0;
match = 1 << 31;
if (nc > 32)
end = 1;
else
end = 1 << (32 - nc);
while (match >= end)
{
if ((match&bitmapzz[0]) == 0)
{
run++;
j++;
match >>= 1;
}
else
{
match >>= 1;
level = dataBlock[j];
dataBlock[j] = 0; /* reset output */
j++;
if (level < 0)
{
RLB->level[idx] = -level;
RLB->s[idx] = 1;
RLB->run[idx] = run;
run = 0;
idx++;
}
else
{
RLB->level[idx] = level;
RLB->s[idx] = 0;
RLB->run[idx] = run;
run = 0;
idx++;
}
}
}
nc -= 32;
if (nc > 0)
{
match = 1 << 31;
end = 1 << (32 - nc);
while (match >= end)
{
if ((match&bitmapzz[1]) == 0)
{
run++;
j++;
match >>= 1;
}
else
{
match >>= 1;
level = dataBlock[j];
dataBlock[j] = 0; /* reset output */
j++;
if (level < 0)
{
RLB->level[idx] = -level;
RLB->s[idx] = 1;
RLB->run[idx] = run;
run = 0;
idx++;
}
else
{
RLB->level[idx] = level;
RLB->s[idx] = 0;
RLB->run[idx] = run;
run = 0;
idx++;
}
}
}
}
return idx;
}
#elif defined(__CC_ARM) /* only work with arm v5 */
__inline Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc)
{
OSCL_UNUSED_ARG(nc);
Int idx, run, level, j;
UInt end, match;
Int zzorder;
idx = 0;
run = 0;
j = -1;
__asm
{
ldr match, [bitmapzz]
clz run, match
}
zzorder = 0;
while (run < 32)
{
__asm
{
mov end, #0x80000000
mov end, end, lsr run /* mask*/
bic match, match, end /* remove it from bitmap */
mov run, run, lsl #1 /* 05/09/02 */
ldrsh level, [dataBlock, run] /* load data */
strh zzorder, [dataBlock, run] /* reset output */
add j, j, #1
rsb run, j, run, lsr #1 /* delta run */
add j, j, run /* current position */
}
if (level < 0)
{
RLB->level[idx] = -level;
RLB->s[idx] = 1;
RLB->run[idx] = run;
run = 0;
idx++;
}
else
{
RLB->level[idx] = level;
RLB->s[idx] = 0;
RLB->run[idx] = run;
run = 0;
idx++;
}
__asm
{
clz run, match
}
}
__asm
{
ldr match, [bitmapzz, #4]
clz run, match
}
while (run < 32)
{
__asm
{
mov end, #0x80000000
mov end, end, lsr run /* mask*/
bic match, match, end /* remove it from bitmap */
add run, run, #32 /* current position */
mov run, run, lsl #1 /* 09/02/05 */
ldrsh level, [dataBlock, run] /* load data */
strh zzorder, [dataBlock, run] /* reset output */
add j, j, #1
rsb run, j, run, lsr #1 /* delta run */
add j, j, run /* current position */
}
if (level < 0)
{
RLB->level[idx] = -level;
RLB->s[idx] = 1;
RLB->run[idx] = run;
run = 0;
idx++;
}
else
{
RLB->level[idx] = level;
RLB->s[idx] = 0;
RLB->run[idx] = run;
run = 0;
idx++;
}
__asm
{
clz run, match
}
}
return idx;
}
#elif ( defined(PV_ARM_GCC_V4) || defined(PV_ARM_GCC_V5) ) /* ARM GNU COMPILER */
__inline Int m4v_enc_clz(UInt temp)
{
register Int rb;
register UInt ra = (UInt)temp;
asm volatile("clz %0, %1"
: "=&r"(rb)
: "r"(ra)
);
return (rb);
}
__inline Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc)
{
OSCL_UNUSED_ARG(nc);
Int idx, run, level = 0, j;
UInt end = 0, match;
Int zzorder;
idx = 0;
run = 0;
j = -1;
match = *bitmapzz;
run = m4v_enc_clz(match);
zzorder = 0;
while (run < 32)
{
asm volatile("mov %0, #0x80000000\n\t"
"mov %0, %0, lsr %1\n\t"
"bic %2, %2, %0\n\t"
"mov %1, %1, lsl #1\n\t"
"ldrsh %3, [%6, %1]\n\t"
"strh %5, [%6, %1]\n\t"
"add %4, %4, #1\n\t"
"rsb %1, %4, %1, lsr #1\n\t"
"add %4, %4, %1"
: "+r"(end), "+r"(run), "+r"(match), "=r"(level), "+r"(j)
: "r"(zzorder), "r"(dataBlock));
if (level < 0)
{
RLB->level[idx] = -level;
RLB->s[idx] = 1;
RLB->run[idx] = run;
run = 0;
idx++;
}
else
{
RLB->level[idx] = level;
RLB->s[idx] = 0;
RLB->run[idx] = run;
run = 0;
idx++;
}
run = m4v_enc_clz(match);
}
match = bitmapzz[1];
run = m4v_enc_clz(match);
while (run < 32)
{
asm volatile("mov %0, #0x80000000\n\t"
"mov %0, %0, lsr %1\n\t"
"bic %2, %2, %0\n\t"
"add %1, %1, #32\n\t"
"mov %1, %1, lsl #1\n\t"
"ldrsh %3, [%6, %1]\n\t"
"strh %5, [%6, %1]\n\t"
"add %4, %4, #1\n\t"
"rsb %1, %4, %1, lsr #1\n\t"
"add %4, %4, %1"
: "+r"(end), "+r"(run), "+r"(match), "+r"(level), "+r"(j)
: "r"(zzorder), "r"(dataBlock));
if (level < 0)
{
RLB->level[idx] = -level;
RLB->s[idx] = 1;
RLB->run[idx] = run;
run = 0;
idx++;
}
else
{
RLB->level[idx] = level;
RLB->s[idx] = 0;
RLB->run[idx] = run;
run = 0;
idx++;
}
run = m4v_enc_clz(match);
}
return idx;
}
#endif
#endif // _VLC_ENCODE_INLINE_H_

View File

@ -0,0 +1,581 @@
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
#include "mp4def.h"
#include "mp4lib_int.h"
#include "mp4enc_lib.h"
#include "bitstream_io.h"
#include "m4venc_oscl.h"
PV_STATUS EncodeShortHeader(BitstreamEncVideo *stream, Vop *currVop);
PV_STATUS EncodeVOPHeader(BitstreamEncVideo *stream, Vol *currVol, Vop *currVop);
PV_STATUS EncodeGOVHeader(BitstreamEncVideo *stream, UInt seconds);
PV_STATUS EncodeVop_BXRC(VideoEncData *video);
PV_STATUS EncodeVop_NoME(VideoEncData *video);
/* ======================================================================== */
/* Function : DecodeVop() */
/* Date : 08/23/2000 */
/* Purpose : Encode VOP Header */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS EncodeVop(VideoEncData *video)
{
PV_STATUS status;
Int currLayer = video->currLayer;
Vol *currVol = video->vol[currLayer];
Vop *currVop = video->currVop;
// BitstreamEncVideo *stream=video->bitstream1;
UChar *Mode = video->headerInfo.Mode;
rateControl **rc = video->rc;
// UInt time=0;
/*******************/
/* Initialize mode */
/*******************/
switch (currVop->predictionType)
{
case I_VOP:
M4VENC_MEMSET(Mode, MODE_INTRA, sizeof(UChar)*currVol->nTotalMB);
break;
case P_VOP:
M4VENC_MEMSET(Mode, MODE_INTER, sizeof(UChar)*currVol->nTotalMB);
break;
case B_VOP:
/*M4VENC_MEMSET(Mode, MODE_INTER_B,sizeof(UChar)*nTotalMB);*/
return PV_FAIL;
default:
return PV_FAIL;
}
/*********************/
/* Motion Estimation */
/* compute MVs, scene change detection, edge padding, */
/* intra refresh, compute block activity */
/*********************/
MotionEstimation(video); /* do ME for the whole frame */
/***************************/
/* rate Control (assign QP) */
/* 4/11/01, clean-up, and put into a separate function */
/***************************/
status = RC_VopQPSetting(video, rc);
if (status == PV_FAIL)
return PV_FAIL;
/**********************/
/* Encode VOP */
/**********************/
if (video->slice_coding) /* end here */
{
/* initialize state variable for slice-based APIs */
video->totalSAD = 0;
video->mbnum = 0;
video->sliceNo[0] = 0;
video->numIntra = 0;
video->offset = 0;
video->end_of_buf = 0;
video->hp_guess = -1;
return status;
}
status = EncodeVop_NoME(video);
/******************************/
/* rate control (update stat) */
/* 6/2/01 separate function */
/******************************/
RC_VopUpdateStat(video, rc[currLayer]);
return status;
}
/* ======================================================================== */
/* Function : EncodeVop_NoME() */
/* Date : 08/28/2001 */
/* History : */
/* Purpose : EncodeVop without motion est. */
/* In/out : */
/* Return : */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeVop_NoME(VideoEncData *video)
{
Vop *currVop = video->currVop;
Vol *currVol = video->vol[video->currLayer];
BitstreamEncVideo *stream = video->bitstream1;
Int time = 0; /* follows EncodeVop value */
PV_STATUS status = PV_SUCCESS;
if (currVol->shortVideoHeader) /* Short Video Header = 1 */
{
status = EncodeShortHeader(stream, currVop); /* Encode Short Header */
video->header_bits = BitstreamGetPos(stream); /* Header Bits */
status = EncodeFrameCombinedMode(video);
}
#ifndef H263_ONLY
else /* Short Video Header = 0 */
{
if (currVol->GOVStart && currVop->predictionType == I_VOP)
status = EncodeGOVHeader(stream, time); /* Encode GOV Header */
status = EncodeVOPHeader(stream, currVol, currVop); /* Encode VOP Header */
video->header_bits = BitstreamGetPos(stream); /* Header Bits */
if (currVop->vopCoded)
{
if (!currVol->scalability)
{
if (currVol->dataPartitioning)
{
status = EncodeFrameDataPartMode(video); /* Encode Data Partitioning Mode VOP */
}
else
{
status = EncodeFrameCombinedMode(video); /* Encode Combined Mode VOP */
}
}
else
status = EncodeFrameCombinedMode(video); /* Encode Combined Mode VOP */
}
else /* Vop Not coded */
{
return status;
}
}
#endif /* H263_ONLY */
return status;
}
#ifndef NO_SLICE_ENCODE
/* ======================================================================== */
/* Function : EncodeSlice() */
/* Date : 04/19/2002 */
/* History : */
/* Purpose : Encode one slice. */
/* In/out : */
/* Return : */
/* Modified : */
/* */
/* ======================================================================== */
PV_STATUS EncodeSlice(VideoEncData *video)
{
Vop *currVop = video->currVop;
Int currLayer = video->currLayer;
Vol *currVol = video->vol[currLayer];
BitstreamEncVideo *stream = video->bitstream1; /* different from frame-based */
Int time = 0; /* follows EncodeVop value */
PV_STATUS status = PV_SUCCESS;
rateControl **rc = video->rc;
if (currVol->shortVideoHeader) /* Short Video Header = 1 */
{
if (video->mbnum == 0)
{
status = EncodeShortHeader(stream, currVop); /* Encode Short Header */
video->header_bits = BitstreamGetPos(stream); /* Header Bits */
}
status = EncodeSliceCombinedMode(video);
}
#ifndef H263_ONLY
else /* Short Video Header = 0 */
{
if (video->mbnum == 0)
{
if (currVol->GOVStart)
status = EncodeGOVHeader(stream, time); /* Encode GOV Header */
status = EncodeVOPHeader(stream, currVol, currVop); /* Encode VOP Header */
video->header_bits = BitstreamGetPos(stream); /* Header Bits */
}
if (currVop->vopCoded)
{
if (!currVol->scalability)
{
if (currVol->dataPartitioning)
{
status = EncodeSliceDataPartMode(video); /* Encode Data Partitioning Mode VOP */
}
else
{
status = EncodeSliceCombinedMode(video); /* Encode Combined Mode VOP */
}
}
else
status = EncodeSliceCombinedMode(video); /* Encode Combined Mode VOP */
}
else /* Vop Not coded */
{
return status;
}
}
#endif /* H263_ONLY */
if (video->mbnum >= currVol->nTotalMB && status != PV_END_OF_BUF) /* end of Vop */
{
/******************************/
/* rate control (update stat) */
/* 6/2/01 separate function */
/******************************/
status = RC_VopUpdateStat(video, rc[currLayer]);
}
return status;
}
#endif /* NO_SLICE_ENCODE */
#ifndef H263_ONLY
/* ======================================================================== */
/* Function : EncodeGOVHeader() */
/* Date : 08/23/2000 */
/* Purpose : Encode GOV Header */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS EncodeGOVHeader(BitstreamEncVideo *stream, UInt seconds)
{
PV_STATUS status;
// int temp;
UInt tmpvar;
/********************************/
/* Group_of_VideoObjectPlane() */
/********************************/
status = BitstreamPutGT16Bits(stream, 32, GROUP_START_CODE);
/* time_code */
tmpvar = seconds / 3600;
status = BitstreamPutBits(stream, 5, tmpvar); /* Hours*/
tmpvar = (seconds - tmpvar * 3600) / 60;
status = BitstreamPutBits(stream, 6, tmpvar); /* Minutes*/
status = BitstreamPut1Bits(stream, 1); /* Marker*/
tmpvar = seconds % 60;
status = BitstreamPutBits(stream, 6, tmpvar); /* Seconds*/
status = BitstreamPut1Bits(stream, 1); /* closed_gov */
status = BitstreamPut1Bits(stream, 0); /* broken_link */
/*temp =*/
BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align GOV Header */
return status;
}
#ifdef ALLOW_VOP_NOT_CODED
PV_STATUS EncodeVopNotCoded(VideoEncData *video, UChar *bstream, Int *size, ULong modTime)
{
PV_STATUS status;
Vol *currVol = video->vol[0];
Vop *currVop = video->currVop;
BitstreamEncVideo *stream = currVol->stream;
UInt frameTick;
Int timeInc;
stream->bitstreamBuffer = bstream;
stream->bufferSize = *size;
BitstreamEncReset(stream);
status = BitstreamPutGT16Bits(stream, 32, VOP_START_CODE); /*Start Code for VOP*/
status = BitstreamPutBits(stream, 2, P_VOP);/* VOP Coding Type*/
frameTick = (Int)(((double)(modTime - video->modTimeRef) * currVol->timeIncrementResolution + 500) / 1000);
timeInc = frameTick - video->refTick[0];
while (timeInc >= currVol->timeIncrementResolution)
{
timeInc -= currVol->timeIncrementResolution;
status = BitstreamPut1Bits(stream, 1);
/* do not update refTick and modTimeRef yet, do it after encoding!! */
}
status = BitstreamPut1Bits(stream, 0);
status = BitstreamPut1Bits(stream, 1); /* marker bit */
status = BitstreamPutBits(stream, currVol->nbitsTimeIncRes, timeInc); /* vop_time_increment */
status = BitstreamPut1Bits(stream, 1); /* marker bit */
status = BitstreamPut1Bits(stream, 0); /* vop_coded bit */
BitstreamMpeg4ByteAlignStuffing(stream);
return status;
}
#endif
/* ======================================================================== */
/* Function : EncodeVOPHeader() */
/* Date : 08/23/2000 */
/* Purpose : Encode VOP Header */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS EncodeVOPHeader(BitstreamEncVideo *stream, Vol *currVol, Vop *currVop)
{
PV_STATUS status;
//int temp;
int MTB = currVol->moduloTimeBase;
/************************/
/* VideoObjectPlane() */
/************************/
status = BitstreamPutGT16Bits(stream, 32, VOP_START_CODE); /*Start Code for VOP*/
status = BitstreamPutBits(stream, 2, currVop->predictionType);/* VOP Coding Type*/
currVol->prevModuloTimeBase = currVol->moduloTimeBase;
while (MTB)
{
status = BitstreamPut1Bits(stream, 1);
MTB--;
}
status = BitstreamPut1Bits(stream, 0);
status = BitstreamPut1Bits(stream, 1); /* marker bit */
status = BitstreamPutBits(stream, currVol->nbitsTimeIncRes, currVop->timeInc); /* vop_time_increment */
status = BitstreamPut1Bits(stream, 1); /* marker bit */
status = BitstreamPut1Bits(stream, currVop->vopCoded); /* vop_coded bit */
if (currVop->vopCoded == 0)
{
/*temp =*/
BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align VOP Header */
return status;
}
if (currVop->predictionType == P_VOP)
status = BitstreamPut1Bits(stream, currVop->roundingType); /* vop_rounding_type */
status = BitstreamPutBits(stream, 3, currVop->intraDCVlcThr); /* intra_dc_vlc_thr */
status = BitstreamPutBits(stream, 5, currVop->quantizer); /* vop_quant */
if (currVop->predictionType != I_VOP)
status = BitstreamPutBits(stream, 3, currVop->fcodeForward); /* vop_fcode_forward */
if (currVop->predictionType == B_VOP)
status = BitstreamPutBits(stream, 3, currVop->fcodeBackward);/* vop_fcode_backward */
if (currVol->scalability)
/* enhancement_type = 0 */
status = BitstreamPutBits(stream, 2, currVop->refSelectCode); /* ref_select_code */
return status;
}
#endif /* H263_ONLY */
/* ======================================================================== */
/* Function : EncodeShortHeader() */
/* Date : 08/23/2000 */
/* Purpose : Encode VOP Header */
/* In/out : */
/* Return : */
/* Modified : */
/* ======================================================================== */
PV_STATUS EncodeShortHeader(BitstreamEncVideo *stream, Vop *currVop)
{
PV_STATUS status;
status = BitstreamPutGT16Bits(stream, 22, SHORT_VIDEO_START_MARKER); /* Short_video_start_marker */
status = BitstreamPutBits(stream, 8, currVop->temporalRef); /* temporal_reference */
status = BitstreamPut1Bits(stream, 1); /* marker bit */
status = BitstreamPut1Bits(stream, 0); /* zero bit */
status = BitstreamPut1Bits(stream, 0); /* split_screen_indicator=0*/
status = BitstreamPut1Bits(stream, 0); /* document_camera_indicator=0*/
status = BitstreamPut1Bits(stream, 0); /* full_picture_freeze_release=0*/
switch (currVop->width)
{
case 128:
if (currVop->height == 96)
status = BitstreamPutBits(stream, 3, 1); /* source_format = 1 */
else
{
status = PV_FAIL;
return status;
}
break;
case 176:
if (currVop->height == 144)
status = BitstreamPutBits(stream, 3, 2); /* source_format = 2 */
else
{
status = PV_FAIL;
return status;
}
break;
case 352:
if (currVop->height == 288)
status = BitstreamPutBits(stream, 3, 3); /* source_format = 3 */
else
{
status = PV_FAIL;
return status;
}
break;
case 704:
if (currVop->height == 576)
status = BitstreamPutBits(stream, 3, 4); /* source_format = 4 */
else
{
status = PV_FAIL;
return status;
}
break;
case 1408:
if (currVop->height == 1152)
status = BitstreamPutBits(stream, 3, 5); /* source_format = 5 */
else
{
status = PV_FAIL;
return status;
}
break;
default:
status = PV_FAIL;
return status;
}
status = BitstreamPut1Bits(stream, currVop->predictionType); /* picture_coding type */
status = BitstreamPutBits(stream, 4, 0); /* four_reserved_zero_bits */
status = BitstreamPutBits(stream, 5, currVop->quantizer); /* vop_quant*/
status = BitstreamPut1Bits(stream, 0); /* zero_bit*/
status = BitstreamPut1Bits(stream, 0); /* pei=0 */
return status;
}
#ifndef H263_ONLY
/* ======================================================================== */
/* Function : EncodeVideoPacketHeader() */
/* Date : 09/05/2000 */
/* History : */
/* Purpose : Encode a frame of MPEG4 bitstream in Combined mode. */
/* In/out : */
/* Return : */
/* Modified : 04/25/2002 */
/* Add bitstream structure as input argument */
/* */
/* ======================================================================== */
PV_STATUS EncodeVideoPacketHeader(VideoEncData *video, int MB_number,
int quant_scale, Int insert)
{
// PV_STATUS status=PV_SUCCESS;
int fcode;
Vop *currVop = video->currVop;
Vol *currVol = video->vol[video->currLayer];
BitstreamEncVideo *bs, tmp;
UChar buffer[30];
if (insert) /* insert packet header to the beginning of bs1 */
{
tmp.bitstreamBuffer = buffer; /* use temporary buffer */
tmp.bufferSize = 30;
BitstreamEncReset(&tmp);
bs = &tmp;
}
else
bs = video->bitstream1;
if (currVop->predictionType == I_VOP)
BitstreamPutGT16Bits(bs, 17, 1); /* resync_marker I_VOP */
else if (currVop->predictionType == P_VOP)
{
fcode = currVop->fcodeForward;
BitstreamPutGT16Bits(bs, 16 + fcode, 1); /* resync_marker P_VOP */
}
else
{
fcode = currVop->fcodeForward;
if (currVop->fcodeBackward > fcode)
fcode = currVop->fcodeBackward;
BitstreamPutGT16Bits(bs, 16 + fcode, 1); /* resync_marker B_VOP */
}
BitstreamPutBits(bs, currVol->nBitsForMBID, MB_number); /* resync_marker */
BitstreamPutBits(bs, 5, quant_scale); /* quant_scale */
BitstreamPut1Bits(bs, 0); /* header_extension_code = 0 */
if (0) /* header_extension_code = 1 */
{
/* NEED modulo_time_base code here ... default 0x01 belo*/
/*status =*/
BitstreamPut1Bits(bs, 1);
/*status = */
BitstreamPut1Bits(bs, 0);
/*status = */
BitstreamPut1Bits(bs, 1); /* marker bit */
/*status = */
BitstreamPutBits(bs, currVol->nbitsTimeIncRes, currVop->timeInc); /* vop_time_increment */
/*status = */
BitstreamPut1Bits(bs, 1); /* marker bit */
/*status = */
BitstreamPutBits(bs, 2, currVop->predictionType);/* VOP Coding Type*/
/*status = */
BitstreamPutBits(bs, 3, currVop->intraDCVlcThr); /* intra_dc_vlc_thr */
if (currVop->predictionType != I_VOP)
/*status = */ BitstreamPutBits(bs, 3, currVop->fcodeForward);
if (currVop->predictionType == B_VOP)
/*status = */ BitstreamPutBits(bs, 3, currVop->fcodeBackward);
}
#ifndef NO_SLICE_ENCODE
if (insert)
BitstreamPrependPacket(video->bitstream1, bs);
#endif
return PV_SUCCESS;
}
#endif /* H263_ONLY */

View File

@ -0,0 +1,80 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef M4V_H263_ENCODER_H_
#define M4V_H263_ENCODER_H_
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
struct tagvideoEncControls;
struct tagvideoEncOptions;
namespace android {
struct MediaBuffer;
struct MediaBufferGroup;
struct M4vH263Encoder : public MediaSource,
public MediaBufferObserver {
M4vH263Encoder(const sp<MediaSource> &source,
const sp<MetaData>& meta);
virtual status_t start(MetaData *params);
virtual status_t stop();
virtual sp<MetaData> getFormat();
virtual status_t read(
MediaBuffer **buffer, const ReadOptions *options);
virtual void signalBufferReturned(MediaBuffer *buffer);
protected:
virtual ~M4vH263Encoder();
private:
sp<MediaSource> mSource;
sp<MetaData> mFormat;
sp<MetaData> mMeta;
int32_t mVideoWidth;
int32_t mVideoHeight;
int32_t mVideoFrameRate;
int32_t mVideoBitRate;
int32_t mVideoColorFormat;
int64_t mNumInputFrames;
int64_t mNextModTimeUs;
status_t mInitCheck;
bool mStarted;
tagvideoEncControls *mHandle;
tagvideoEncOptions *mEncParams;
MediaBuffer *mInputBuffer;
uint8_t *mInputFrameData;
MediaBufferGroup *mGroup;
status_t initCheck(const sp<MetaData>& meta);
void releaseOutputBuffers();
M4vH263Encoder(const M4vH263Encoder &);
M4vH263Encoder &operator=(const M4vH263Encoder &);
};
} // namespace android
#endif // M4V_H263_ENCODER_H_