am f4619308
: Merge "fix [2835280] Add support for cancelling buffers to ANativeWindow" into gingerbread
Merge commit 'f461930872beb4362998642c115691bdd2e94617' into gingerbread-plus-aosp * commit 'f461930872beb4362998642c115691bdd2e94617': fix [2835280] Add support for cancelling buffers to ANativeWindow
This commit is contained in:
@ -114,8 +114,9 @@ public:
|
|||||||
|
|
||||||
int32_t identity; // surface's identity (const)
|
int32_t identity; // surface's identity (const)
|
||||||
int32_t token; // surface's token (for debugging)
|
int32_t token; // surface's token (for debugging)
|
||||||
int32_t reserved32[1];
|
|
||||||
Statistics stats;
|
Statistics stats;
|
||||||
|
int8_t headBuf; // last retired buffer
|
||||||
|
uint8_t reservedBytes[3];
|
||||||
int32_t reserved;
|
int32_t reserved;
|
||||||
BufferData buffers[NUM_BUFFER_MAX]; // 1024 bytes
|
BufferData buffers[NUM_BUFFER_MAX]; // 1024 bytes
|
||||||
};
|
};
|
||||||
@ -201,6 +202,7 @@ public:
|
|||||||
status_t undoDequeue(int buf);
|
status_t undoDequeue(int buf);
|
||||||
|
|
||||||
status_t lock(int buf);
|
status_t lock(int buf);
|
||||||
|
status_t cancel(int buf);
|
||||||
status_t queue(int buf);
|
status_t queue(int buf);
|
||||||
bool needNewBuffer(int buffer) const;
|
bool needNewBuffer(int buffer) const;
|
||||||
status_t setDirtyRegion(int buffer, const Region& reg);
|
status_t setDirtyRegion(int buffer, const Region& reg);
|
||||||
@ -230,8 +232,9 @@ private:
|
|||||||
inline ssize_t operator()();
|
inline ssize_t operator()();
|
||||||
};
|
};
|
||||||
|
|
||||||
struct UndoDequeueUpdate : public UpdateBase {
|
struct CancelUpdate : public UpdateBase {
|
||||||
inline UndoDequeueUpdate(SharedBufferBase* sbb);
|
int tail, buf;
|
||||||
|
inline CancelUpdate(SharedBufferBase* sbb, int tail, int buf);
|
||||||
inline ssize_t operator()();
|
inline ssize_t operator()();
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -256,7 +259,6 @@ private:
|
|||||||
int mNumBuffers;
|
int mNumBuffers;
|
||||||
|
|
||||||
int32_t tail;
|
int32_t tail;
|
||||||
int32_t undoDequeueTail;
|
|
||||||
int32_t queued_head;
|
int32_t queued_head;
|
||||||
// statistics...
|
// statistics...
|
||||||
nsecs_t mDequeueTime[SharedBufferStack::NUM_BUFFER_MAX];
|
nsecs_t mDequeueTime[SharedBufferStack::NUM_BUFFER_MAX];
|
||||||
|
@ -199,6 +199,7 @@ private:
|
|||||||
*/
|
*/
|
||||||
static int setSwapInterval(ANativeWindow* window, int interval);
|
static int setSwapInterval(ANativeWindow* window, int interval);
|
||||||
static int dequeueBuffer(ANativeWindow* window, android_native_buffer_t** buffer);
|
static int dequeueBuffer(ANativeWindow* window, android_native_buffer_t** buffer);
|
||||||
|
static int cancelBuffer(ANativeWindow* window, android_native_buffer_t* buffer);
|
||||||
static int lockBuffer(ANativeWindow* window, android_native_buffer_t* buffer);
|
static int lockBuffer(ANativeWindow* window, android_native_buffer_t* buffer);
|
||||||
static int queueBuffer(ANativeWindow* window, android_native_buffer_t* buffer);
|
static int queueBuffer(ANativeWindow* window, android_native_buffer_t* buffer);
|
||||||
static int query(ANativeWindow* window, int what, int* value);
|
static int query(ANativeWindow* window, int what, int* value);
|
||||||
@ -207,6 +208,7 @@ private:
|
|||||||
int dequeueBuffer(android_native_buffer_t** buffer);
|
int dequeueBuffer(android_native_buffer_t** buffer);
|
||||||
int lockBuffer(android_native_buffer_t* buffer);
|
int lockBuffer(android_native_buffer_t* buffer);
|
||||||
int queueBuffer(android_native_buffer_t* buffer);
|
int queueBuffer(android_native_buffer_t* buffer);
|
||||||
|
int cancelBuffer(android_native_buffer_t* buffer);
|
||||||
int query(int what, int* value);
|
int query(int what, int* value);
|
||||||
int perform(int operation, va_list args);
|
int perform(int operation, va_list args);
|
||||||
|
|
||||||
|
@ -218,7 +218,17 @@ struct ANativeWindow
|
|||||||
int (*perform)(struct ANativeWindow* window,
|
int (*perform)(struct ANativeWindow* window,
|
||||||
int operation, ... );
|
int operation, ... );
|
||||||
|
|
||||||
void* reserved_proc[3];
|
/*
|
||||||
|
* hook used to cancel a buffer that has been dequeued.
|
||||||
|
* No synchronization is performed between dequeue() and cancel(), so
|
||||||
|
* either external synchronization is needed, or these functions must be
|
||||||
|
* called from the same thread.
|
||||||
|
*/
|
||||||
|
int (*cancelBuffer)(struct ANativeWindow* window,
|
||||||
|
struct android_native_buffer_t* buffer);
|
||||||
|
|
||||||
|
|
||||||
|
void* reserved_proc[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
// Backwards compatibility... please switch to ANativeWindow.
|
// Backwards compatibility... please switch to ANativeWindow.
|
||||||
|
@ -285,10 +285,12 @@ ssize_t SharedBufferClient::DequeueUpdate::operator()() {
|
|||||||
return NO_ERROR;
|
return NO_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
SharedBufferClient::UndoDequeueUpdate::UndoDequeueUpdate(SharedBufferBase* sbb)
|
SharedBufferClient::CancelUpdate::CancelUpdate(SharedBufferBase* sbb,
|
||||||
: UpdateBase(sbb) {
|
int tail, int buf)
|
||||||
|
: UpdateBase(sbb), tail(tail), buf(buf) {
|
||||||
}
|
}
|
||||||
ssize_t SharedBufferClient::UndoDequeueUpdate::operator()() {
|
ssize_t SharedBufferClient::CancelUpdate::operator()() {
|
||||||
|
stack.index[tail] = buf;
|
||||||
android_atomic_inc(&stack.available);
|
android_atomic_inc(&stack.available);
|
||||||
return NO_ERROR;
|
return NO_ERROR;
|
||||||
}
|
}
|
||||||
@ -319,7 +321,7 @@ ssize_t SharedBufferServer::RetireUpdate::operator()() {
|
|||||||
return BAD_VALUE;
|
return BAD_VALUE;
|
||||||
|
|
||||||
// Preventively lock the current buffer before updating queued.
|
// Preventively lock the current buffer before updating queued.
|
||||||
android_atomic_write(stack.index[head], &stack.inUse);
|
android_atomic_write(stack.headBuf, &stack.inUse);
|
||||||
|
|
||||||
// Decrement the number of queued buffers
|
// Decrement the number of queued buffers
|
||||||
int32_t queued;
|
int32_t queued;
|
||||||
@ -334,7 +336,9 @@ ssize_t SharedBufferServer::RetireUpdate::operator()() {
|
|||||||
// the buffer we preventively locked upon entering this function
|
// the buffer we preventively locked upon entering this function
|
||||||
|
|
||||||
head = (head + 1) % numBuffers;
|
head = (head + 1) % numBuffers;
|
||||||
android_atomic_write(stack.index[head], &stack.inUse);
|
const int8_t headBuf = stack.index[head];
|
||||||
|
stack.headBuf = headBuf;
|
||||||
|
android_atomic_write(headBuf, &stack.inUse);
|
||||||
|
|
||||||
// head is only modified here, so we don't need to use cmpxchg
|
// head is only modified here, so we don't need to use cmpxchg
|
||||||
android_atomic_write(head, &stack.head);
|
android_atomic_write(head, &stack.head);
|
||||||
@ -359,7 +363,7 @@ ssize_t SharedBufferServer::StatusUpdate::operator()() {
|
|||||||
SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
|
SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
|
||||||
int surface, int num, int32_t identity)
|
int surface, int num, int32_t identity)
|
||||||
: SharedBufferBase(sharedClient, surface, identity),
|
: SharedBufferBase(sharedClient, surface, identity),
|
||||||
mNumBuffers(num), tail(0), undoDequeueTail(0)
|
mNumBuffers(num), tail(0)
|
||||||
{
|
{
|
||||||
SharedBufferStack& stack( *mSharedStack );
|
SharedBufferStack& stack( *mSharedStack );
|
||||||
tail = computeTail();
|
tail = computeTail();
|
||||||
@ -395,7 +399,6 @@ ssize_t SharedBufferClient::dequeue()
|
|||||||
DequeueUpdate update(this);
|
DequeueUpdate update(this);
|
||||||
updateCondition( update );
|
updateCondition( update );
|
||||||
|
|
||||||
undoDequeueTail = tail;
|
|
||||||
int dequeued = stack.index[tail];
|
int dequeued = stack.index[tail];
|
||||||
tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
|
tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
|
||||||
LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
|
LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
|
||||||
@ -407,15 +410,20 @@ ssize_t SharedBufferClient::dequeue()
|
|||||||
}
|
}
|
||||||
|
|
||||||
status_t SharedBufferClient::undoDequeue(int buf)
|
status_t SharedBufferClient::undoDequeue(int buf)
|
||||||
|
{
|
||||||
|
return cancel(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
status_t SharedBufferClient::cancel(int buf)
|
||||||
{
|
{
|
||||||
RWLock::AutoRLock _rd(mLock);
|
RWLock::AutoRLock _rd(mLock);
|
||||||
|
|
||||||
// TODO: we can only undo the previous dequeue, we should
|
// calculate the new position of the tail index (essentially tail--)
|
||||||
// enforce that in the api
|
int localTail = (tail + mNumBuffers - 1) % mNumBuffers;
|
||||||
UndoDequeueUpdate update(this);
|
CancelUpdate update(this, localTail, buf);
|
||||||
status_t err = updateCondition( update );
|
status_t err = updateCondition( update );
|
||||||
if (err == NO_ERROR) {
|
if (err == NO_ERROR) {
|
||||||
tail = undoDequeueTail;
|
tail = localTail;
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -409,6 +409,7 @@ void Surface::init()
|
|||||||
{
|
{
|
||||||
ANativeWindow::setSwapInterval = setSwapInterval;
|
ANativeWindow::setSwapInterval = setSwapInterval;
|
||||||
ANativeWindow::dequeueBuffer = dequeueBuffer;
|
ANativeWindow::dequeueBuffer = dequeueBuffer;
|
||||||
|
ANativeWindow::cancelBuffer = cancelBuffer;
|
||||||
ANativeWindow::lockBuffer = lockBuffer;
|
ANativeWindow::lockBuffer = lockBuffer;
|
||||||
ANativeWindow::queueBuffer = queueBuffer;
|
ANativeWindow::queueBuffer = queueBuffer;
|
||||||
ANativeWindow::query = query;
|
ANativeWindow::query = query;
|
||||||
@ -517,6 +518,12 @@ int Surface::dequeueBuffer(ANativeWindow* window,
|
|||||||
return self->dequeueBuffer(buffer);
|
return self->dequeueBuffer(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int Surface::cancelBuffer(ANativeWindow* window,
|
||||||
|
android_native_buffer_t* buffer) {
|
||||||
|
Surface* self = getSelf(window);
|
||||||
|
return self->cancelBuffer(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
int Surface::lockBuffer(ANativeWindow* window,
|
int Surface::lockBuffer(ANativeWindow* window,
|
||||||
android_native_buffer_t* buffer) {
|
android_native_buffer_t* buffer) {
|
||||||
Surface* self = getSelf(window);
|
Surface* self = getSelf(window);
|
||||||
@ -617,6 +624,33 @@ int Surface::dequeueBuffer(android_native_buffer_t** buffer)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int Surface::cancelBuffer(android_native_buffer_t* buffer)
|
||||||
|
{
|
||||||
|
status_t err = validate();
|
||||||
|
switch (err) {
|
||||||
|
case NO_ERROR:
|
||||||
|
// no error, common case
|
||||||
|
break;
|
||||||
|
case INVALID_OPERATION:
|
||||||
|
// legitimate errors here
|
||||||
|
return err;
|
||||||
|
default:
|
||||||
|
// other errors happen because the surface is now invalid,
|
||||||
|
// for instance because it has been destroyed. In this case,
|
||||||
|
// we just fail silently (canceling a buffer is not technically
|
||||||
|
// an error at this point)
|
||||||
|
return NO_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t bufIdx = getBufferIndex(GraphicBuffer::getSelf(buffer));
|
||||||
|
|
||||||
|
err = mSharedBufferClient->cancel(bufIdx);
|
||||||
|
|
||||||
|
LOGE_IF(err, "error canceling buffer %d (%s)", bufIdx, strerror(-err));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int Surface::lockBuffer(android_native_buffer_t* buffer)
|
int Surface::lockBuffer(android_native_buffer_t* buffer)
|
||||||
{
|
{
|
||||||
status_t err = validate();
|
status_t err = validate();
|
||||||
|
Reference in New Issue
Block a user