2010-06-29 21:05:21 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2010 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2010-10-27 18:57:51 -07:00
|
|
|
#ifndef ANDROID_HWUI_TEXTURE_H
|
|
|
|
#define ANDROID_HWUI_TEXTURE_H
|
2010-06-29 21:05:21 -07:00
|
|
|
|
2015-11-10 12:19:17 -08:00
|
|
|
#include "GpuMemoryTracker.h"
|
2016-10-24 15:35:21 -07:00
|
|
|
#include "hwui/Bitmap.h"
|
2015-11-10 12:19:17 -08:00
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
#include <GLES2/gl2.h>
|
|
|
|
|
|
|
|
namespace android {
|
|
|
|
namespace uirenderer {
|
|
|
|
|
2013-06-04 18:00:09 -07:00
|
|
|
class Caches;
|
Pack preloaded framework assets in a texture atlas
When the Android runtime starts, the system preloads a series of assets
in the Zygote process. These assets are shared across all processes.
Unfortunately, each one of these assets is later uploaded in its own
OpenGL texture, once per process. This wastes memory and generates
unnecessary OpenGL state changes.
This CL introduces an asset server that provides an atlas to all processes.
Note: bitmaps used by skia shaders are *not* sampled from the atlas.
It's an uncommon use case and would require extra texture transforms
in the GL shaders.
WHAT IS THE ASSETS ATLAS
The "assets atlas" is a single, shareable graphic buffer that contains
all the system's preloaded bitmap drawables (this includes 9-patches.)
The atlas is made of two distinct objects: the graphic buffer that
contains the actual pixels and the map which indicates where each
preloaded bitmap can be found in the atlas (essentially a pair of
x and y coordinates.)
HOW IS THE ASSETS ATLAS GENERATED
Because we need to support a wide variety of devices and because it
is easy to change the list of preloaded drawables, the atlas is
generated at runtime, during the startup phase of the system process.
There are several steps that lead to the atlas generation:
1. If the device is booting for the first time, or if the device was
updated, we need to find the best atlas configuration. To do so,
the atlas service tries a number of width, height and algorithm
variations that allows us to pack as many assets as possible while
using as little memory as possible. Once a best configuration is found,
it gets written to disk in /data/system/framework_atlas
2. Given a best configuration (algorithm variant, dimensions and
number of bitmaps that can be packed in the atlas), the atlas service
packs all the preloaded bitmaps into a single graphic buffer object.
3. The packing is done using Skia in a temporary native bitmap. The
Skia bitmap is then copied into the graphic buffer using OpenGL ES
to benefit from texture swizzling.
HOW PROCESSES USE THE ATLAS
Whenever a process' hardware renderer initializes its EGL context,
it queries the atlas service for the graphic buffer and the map.
It is important to remember that both the context and the map will
be valid for the lifetime of the hardware renderer (if the system
process goes down, all apps get killed as well.)
Every time the hardware renderer needs to render a bitmap, it first
checks whether the bitmap can be found in the assets atlas. When
the bitmap is part of the atlas, texture coordinates are remapped
appropriately before rendering.
Change-Id: I8eaecf53e7f6a33d90da3d0047c5ceec89ea3af0
2013-04-17 18:54:38 -07:00
|
|
|
class UvMapper;
|
2015-11-10 12:19:17 -08:00
|
|
|
class Layer;
|
Pack preloaded framework assets in a texture atlas
When the Android runtime starts, the system preloads a series of assets
in the Zygote process. These assets are shared across all processes.
Unfortunately, each one of these assets is later uploaded in its own
OpenGL texture, once per process. This wastes memory and generates
unnecessary OpenGL state changes.
This CL introduces an asset server that provides an atlas to all processes.
Note: bitmaps used by skia shaders are *not* sampled from the atlas.
It's an uncommon use case and would require extra texture transforms
in the GL shaders.
WHAT IS THE ASSETS ATLAS
The "assets atlas" is a single, shareable graphic buffer that contains
all the system's preloaded bitmap drawables (this includes 9-patches.)
The atlas is made of two distinct objects: the graphic buffer that
contains the actual pixels and the map which indicates where each
preloaded bitmap can be found in the atlas (essentially a pair of
x and y coordinates.)
HOW IS THE ASSETS ATLAS GENERATED
Because we need to support a wide variety of devices and because it
is easy to change the list of preloaded drawables, the atlas is
generated at runtime, during the startup phase of the system process.
There are several steps that lead to the atlas generation:
1. If the device is booting for the first time, or if the device was
updated, we need to find the best atlas configuration. To do so,
the atlas service tries a number of width, height and algorithm
variations that allows us to pack as many assets as possible while
using as little memory as possible. Once a best configuration is found,
it gets written to disk in /data/system/framework_atlas
2. Given a best configuration (algorithm variant, dimensions and
number of bitmaps that can be packed in the atlas), the atlas service
packs all the preloaded bitmaps into a single graphic buffer object.
3. The packing is done using Skia in a temporary native bitmap. The
Skia bitmap is then copied into the graphic buffer using OpenGL ES
to benefit from texture swizzling.
HOW PROCESSES USE THE ATLAS
Whenever a process' hardware renderer initializes its EGL context,
it queries the atlas service for the graphic buffer and the map.
It is important to remember that both the context and the map will
be valid for the lifetime of the hardware renderer (if the system
process goes down, all apps get killed as well.)
Every time the hardware renderer needs to render a bitmap, it first
checks whether the bitmap can be found in the assets atlas. When
the bitmap is part of the atlas, texture coordinates are remapped
appropriately before rendering.
Change-Id: I8eaecf53e7f6a33d90da3d0047c5ceec89ea3af0
2013-04-17 18:54:38 -07:00
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
/**
|
|
|
|
* Represents an OpenGL texture.
|
|
|
|
*/
|
2015-11-10 12:19:17 -08:00
|
|
|
class Texture : public GpuMemoryTracker {
|
2013-06-04 18:00:09 -07:00
|
|
|
public:
|
2016-07-21 18:13:31 -07:00
|
|
|
explicit Texture(Caches& caches)
|
2015-11-10 12:19:17 -08:00
|
|
|
: GpuMemoryTracker(GpuObjectType::Texture)
|
|
|
|
, mCaches(caches)
|
|
|
|
{ }
|
2011-07-07 20:50:11 -07:00
|
|
|
|
2013-05-24 16:19:19 -07:00
|
|
|
virtual ~Texture() { }
|
|
|
|
|
2016-10-25 15:21:50 -07:00
|
|
|
inline void setWrap(GLenum wrap, bool bindTexture = false, bool force = false) {
|
|
|
|
setWrapST(wrap, wrap, bindTexture, force);
|
2011-11-30 20:21:23 -08:00
|
|
|
}
|
|
|
|
|
2013-05-24 16:19:19 -07:00
|
|
|
virtual void setWrapST(GLenum wrapS, GLenum wrapT, bool bindTexture = false,
|
2016-10-25 15:21:50 -07:00
|
|
|
bool force = false);
|
2011-07-25 16:36:01 -07:00
|
|
|
|
2016-10-25 15:21:50 -07:00
|
|
|
inline void setFilter(GLenum filter, bool bindTexture = false, bool force = false) {
|
|
|
|
setFilterMinMag(filter, filter, bindTexture, force);
|
2011-11-30 20:21:23 -08:00
|
|
|
}
|
|
|
|
|
2013-05-24 16:19:19 -07:00
|
|
|
virtual void setFilterMinMag(GLenum min, GLenum mag, bool bindTexture = false,
|
2016-10-25 15:21:50 -07:00
|
|
|
bool force = false);
|
2010-08-06 11:18:34 -07:00
|
|
|
|
2013-06-06 14:02:54 -07:00
|
|
|
/**
|
|
|
|
* Convenience method to call glDeleteTextures() on this texture's id.
|
|
|
|
*/
|
2015-11-10 12:19:17 -08:00
|
|
|
void deleteTexture();
|
2013-06-06 14:02:54 -07:00
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
/**
|
2015-11-10 12:19:17 -08:00
|
|
|
* Sets the width, height, and format of the texture along with allocating
|
|
|
|
* the texture ID. Does nothing if the width, height, and format are already
|
|
|
|
* the requested values.
|
|
|
|
*
|
|
|
|
* The image data is undefined after calling this.
|
2010-06-29 21:05:21 -07:00
|
|
|
*/
|
Linear blending, step 1
NOTE: Linear blending is currently disabled in this CL as the
feature is still a work in progress
Android currently performs all blending (any kind of linear math
on colors really) on gamma-encoded colors. Since Android assumes
that the default color space is sRGB, all bitmaps and colors
are encoded with the sRGB Opto-Electronic Conversion Function
(OECF, which can be approximated with a power function). Since
the power curve is not linear, our linear math is incorrect.
The result is that we generate colors that tend to be too dark;
this affects blending but also anti-aliasing, gradients, blurs,
etc.
The solution is to convert gamma-encoded colors back to linear
space before doing any math on them, using the sRGB Electo-Optical
Conversion Function (EOCF). This is achieved in different
ways in different parts of the pipeline:
- Using hardware conversions when sampling from OpenGL textures
or writing into OpenGL frame buffers
- Using software conversion functions, to translate app-supplied
colors to and from sRGB
- Using Skia's color spaces
Any type of processing on colors must roughly ollow these steps:
[sRGB input]->EOCF->[linear data]->[processing]->OECF->[sRGB output]
For the sRGB color space, the conversion functions are defined as
follows:
OECF(linear) :=
linear <= 0.0031308 ? linear * 12.92 : (pow(linear, 1/2.4) * 1.055) - 0.055
EOCF(srgb) :=
srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4)
The EOCF is simply the reciprocal of the OECF.
While it is highly recommended to use the exact sRGB conversion
functions everywhere possible, it is sometimes useful or beneficial
to rely on approximations:
- pow(x,2.2) and pow(x,1/2.2)
- x^2 and sqrt(x)
The latter is particularly useful in fragment shaders (for instance
to apply dithering in sRGB space), especially if the sqrt() can be
replaced with an inversesqrt().
Here is a fairly exhaustive list of modifications implemented
in this CL:
- Set TARGET_ENABLE_LINEAR_BLENDING := false in BoardConfig.mk
to disable linear blending. This is only for GLES 2.0 GPUs
with no hardware sRGB support. This flag is currently assumed
to be false (see note above)
- sRGB writes are disabled when entering a functor (WebView).
This will need to be fixed at some point
- Skia bitmaps are created with the sRGB color space
- Bitmaps using a 565 config are expanded to 888
- Linear blending is disabled when entering a functor
- External textures are not properly sampled (see below)
- Gradients are interpolated in linear space
- Texture-based dithering was replaced with analytical dithering
- Dithering is done in the quantization color space, which is
why we must do EOCF(OECF(color)+dither)
- Text is now gamma corrected differently depending on the luminance
of the source pixel. The asumption is that a bright pixel will be
blended on a dark background and the other way around. The source
alpha is gamma corrected to thicken dark on bright and thin
bright on dark to match the intended design of fonts. This also
matches the behavior of popular design/drawing applications
- Removed the asset atlas. It did not contain anything useful and
could not be sampled in sRGB without a yet-to-be-defined GL
extension
- The last column of color matrices is converted to linear space
because its value are added to linear colors
Missing features:
- Resource qualifier?
- Regeneration of goldeng images for automated tests
- Handle alpha8/grey8 properly
- Disable sRGB write for layers with external textures
Test: Manual testing while work in progress
Bug: 29940137
Change-Id: I6a07b15ab49b554377cd33a36b6d9971a15e9a0b
2016-09-28 17:34:42 -07:00
|
|
|
void resize(uint32_t width, uint32_t height, GLint internalFormat, GLint format) {
|
|
|
|
upload(internalFormat, width, height, format, GL_UNSIGNED_BYTE, nullptr);
|
2015-11-10 12:19:17 -08:00
|
|
|
}
|
|
|
|
|
2010-06-30 16:05:32 -07:00
|
|
|
/**
|
2016-10-24 15:35:21 -07:00
|
|
|
* Updates this Texture with the contents of the provided Bitmap,
|
2015-11-10 12:19:17 -08:00
|
|
|
* also setting the appropriate width, height, and format. It is not necessary
|
|
|
|
* to call resize() prior to this.
|
|
|
|
*
|
2016-10-24 15:35:21 -07:00
|
|
|
* Note this does not set the generation from the Bitmap.
|
2010-06-30 16:05:32 -07:00
|
|
|
*/
|
2016-10-24 15:35:21 -07:00
|
|
|
void upload(Bitmap& source);
|
2015-11-10 12:19:17 -08:00
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
/**
|
2015-11-10 12:19:17 -08:00
|
|
|
* Basically glTexImage2D/glTexSubImage2D.
|
2010-06-29 21:05:21 -07:00
|
|
|
*/
|
Linear blending, step 1
NOTE: Linear blending is currently disabled in this CL as the
feature is still a work in progress
Android currently performs all blending (any kind of linear math
on colors really) on gamma-encoded colors. Since Android assumes
that the default color space is sRGB, all bitmaps and colors
are encoded with the sRGB Opto-Electronic Conversion Function
(OECF, which can be approximated with a power function). Since
the power curve is not linear, our linear math is incorrect.
The result is that we generate colors that tend to be too dark;
this affects blending but also anti-aliasing, gradients, blurs,
etc.
The solution is to convert gamma-encoded colors back to linear
space before doing any math on them, using the sRGB Electo-Optical
Conversion Function (EOCF). This is achieved in different
ways in different parts of the pipeline:
- Using hardware conversions when sampling from OpenGL textures
or writing into OpenGL frame buffers
- Using software conversion functions, to translate app-supplied
colors to and from sRGB
- Using Skia's color spaces
Any type of processing on colors must roughly ollow these steps:
[sRGB input]->EOCF->[linear data]->[processing]->OECF->[sRGB output]
For the sRGB color space, the conversion functions are defined as
follows:
OECF(linear) :=
linear <= 0.0031308 ? linear * 12.92 : (pow(linear, 1/2.4) * 1.055) - 0.055
EOCF(srgb) :=
srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4)
The EOCF is simply the reciprocal of the OECF.
While it is highly recommended to use the exact sRGB conversion
functions everywhere possible, it is sometimes useful or beneficial
to rely on approximations:
- pow(x,2.2) and pow(x,1/2.2)
- x^2 and sqrt(x)
The latter is particularly useful in fragment shaders (for instance
to apply dithering in sRGB space), especially if the sqrt() can be
replaced with an inversesqrt().
Here is a fairly exhaustive list of modifications implemented
in this CL:
- Set TARGET_ENABLE_LINEAR_BLENDING := false in BoardConfig.mk
to disable linear blending. This is only for GLES 2.0 GPUs
with no hardware sRGB support. This flag is currently assumed
to be false (see note above)
- sRGB writes are disabled when entering a functor (WebView).
This will need to be fixed at some point
- Skia bitmaps are created with the sRGB color space
- Bitmaps using a 565 config are expanded to 888
- Linear blending is disabled when entering a functor
- External textures are not properly sampled (see below)
- Gradients are interpolated in linear space
- Texture-based dithering was replaced with analytical dithering
- Dithering is done in the quantization color space, which is
why we must do EOCF(OECF(color)+dither)
- Text is now gamma corrected differently depending on the luminance
of the source pixel. The asumption is that a bright pixel will be
blended on a dark background and the other way around. The source
alpha is gamma corrected to thicken dark on bright and thin
bright on dark to match the intended design of fonts. This also
matches the behavior of popular design/drawing applications
- Removed the asset atlas. It did not contain anything useful and
could not be sampled in sRGB without a yet-to-be-defined GL
extension
- The last column of color matrices is converted to linear space
because its value are added to linear colors
Missing features:
- Resource qualifier?
- Regeneration of goldeng images for automated tests
- Handle alpha8/grey8 properly
- Disable sRGB write for layers with external textures
Test: Manual testing while work in progress
Bug: 29940137
Change-Id: I6a07b15ab49b554377cd33a36b6d9971a15e9a0b
2016-09-28 17:34:42 -07:00
|
|
|
void upload(GLint internalFormat, uint32_t width, uint32_t height,
|
2015-11-10 12:19:17 -08:00
|
|
|
GLenum format, GLenum type, const void* pixels);
|
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
/**
|
2015-11-10 12:19:17 -08:00
|
|
|
* Wraps an existing texture.
|
2010-06-29 21:05:21 -07:00
|
|
|
*/
|
2016-10-25 15:21:50 -07:00
|
|
|
void wrap(GLuint id, uint32_t width, uint32_t height, GLint internalFormat,
|
|
|
|
GLint format, GLenum target);
|
2015-11-10 12:19:17 -08:00
|
|
|
|
|
|
|
GLuint id() const {
|
|
|
|
return mId;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t width() const {
|
|
|
|
return mWidth;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t height() const {
|
|
|
|
return mHeight;
|
|
|
|
}
|
|
|
|
|
|
|
|
GLint format() const {
|
|
|
|
return mFormat;
|
|
|
|
}
|
|
|
|
|
Linear blending, step 1
NOTE: Linear blending is currently disabled in this CL as the
feature is still a work in progress
Android currently performs all blending (any kind of linear math
on colors really) on gamma-encoded colors. Since Android assumes
that the default color space is sRGB, all bitmaps and colors
are encoded with the sRGB Opto-Electronic Conversion Function
(OECF, which can be approximated with a power function). Since
the power curve is not linear, our linear math is incorrect.
The result is that we generate colors that tend to be too dark;
this affects blending but also anti-aliasing, gradients, blurs,
etc.
The solution is to convert gamma-encoded colors back to linear
space before doing any math on them, using the sRGB Electo-Optical
Conversion Function (EOCF). This is achieved in different
ways in different parts of the pipeline:
- Using hardware conversions when sampling from OpenGL textures
or writing into OpenGL frame buffers
- Using software conversion functions, to translate app-supplied
colors to and from sRGB
- Using Skia's color spaces
Any type of processing on colors must roughly ollow these steps:
[sRGB input]->EOCF->[linear data]->[processing]->OECF->[sRGB output]
For the sRGB color space, the conversion functions are defined as
follows:
OECF(linear) :=
linear <= 0.0031308 ? linear * 12.92 : (pow(linear, 1/2.4) * 1.055) - 0.055
EOCF(srgb) :=
srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4)
The EOCF is simply the reciprocal of the OECF.
While it is highly recommended to use the exact sRGB conversion
functions everywhere possible, it is sometimes useful or beneficial
to rely on approximations:
- pow(x,2.2) and pow(x,1/2.2)
- x^2 and sqrt(x)
The latter is particularly useful in fragment shaders (for instance
to apply dithering in sRGB space), especially if the sqrt() can be
replaced with an inversesqrt().
Here is a fairly exhaustive list of modifications implemented
in this CL:
- Set TARGET_ENABLE_LINEAR_BLENDING := false in BoardConfig.mk
to disable linear blending. This is only for GLES 2.0 GPUs
with no hardware sRGB support. This flag is currently assumed
to be false (see note above)
- sRGB writes are disabled when entering a functor (WebView).
This will need to be fixed at some point
- Skia bitmaps are created with the sRGB color space
- Bitmaps using a 565 config are expanded to 888
- Linear blending is disabled when entering a functor
- External textures are not properly sampled (see below)
- Gradients are interpolated in linear space
- Texture-based dithering was replaced with analytical dithering
- Dithering is done in the quantization color space, which is
why we must do EOCF(OECF(color)+dither)
- Text is now gamma corrected differently depending on the luminance
of the source pixel. The asumption is that a bright pixel will be
blended on a dark background and the other way around. The source
alpha is gamma corrected to thicken dark on bright and thin
bright on dark to match the intended design of fonts. This also
matches the behavior of popular design/drawing applications
- Removed the asset atlas. It did not contain anything useful and
could not be sampled in sRGB without a yet-to-be-defined GL
extension
- The last column of color matrices is converted to linear space
because its value are added to linear colors
Missing features:
- Resource qualifier?
- Regeneration of goldeng images for automated tests
- Handle alpha8/grey8 properly
- Disable sRGB write for layers with external textures
Test: Manual testing while work in progress
Bug: 29940137
Change-Id: I6a07b15ab49b554377cd33a36b6d9971a15e9a0b
2016-09-28 17:34:42 -07:00
|
|
|
GLint internalFormat() const {
|
|
|
|
return mInternalFormat;
|
|
|
|
}
|
|
|
|
|
2016-10-25 15:21:50 -07:00
|
|
|
GLenum target() const {
|
|
|
|
return mTarget;
|
|
|
|
}
|
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
/**
|
2015-11-10 12:19:17 -08:00
|
|
|
* Generation of the backing bitmap,
|
2010-06-29 21:05:21 -07:00
|
|
|
*/
|
2015-11-10 12:19:17 -08:00
|
|
|
uint32_t generation = 0;
|
|
|
|
/**
|
|
|
|
* Indicates whether the texture requires blending.
|
|
|
|
*/
|
|
|
|
bool blend = false;
|
2010-08-06 11:18:34 -07:00
|
|
|
/**
|
|
|
|
* Indicates whether this texture should be cleaned up after use.
|
|
|
|
*/
|
2015-03-13 15:07:52 -07:00
|
|
|
bool cleanup = false;
|
2010-09-08 15:15:43 -07:00
|
|
|
/**
|
|
|
|
* Optional, size of the original bitmap.
|
|
|
|
*/
|
2015-02-23 13:07:57 -08:00
|
|
|
uint32_t bitmapSize = 0;
|
2012-10-16 18:44:09 -07:00
|
|
|
/**
|
|
|
|
* Indicates whether this texture will use trilinear filtering.
|
|
|
|
*/
|
2015-02-23 13:07:57 -08:00
|
|
|
bool mipMap = false;
|
2010-10-25 18:03:28 -07:00
|
|
|
|
Pack preloaded framework assets in a texture atlas
When the Android runtime starts, the system preloads a series of assets
in the Zygote process. These assets are shared across all processes.
Unfortunately, each one of these assets is later uploaded in its own
OpenGL texture, once per process. This wastes memory and generates
unnecessary OpenGL state changes.
This CL introduces an asset server that provides an atlas to all processes.
Note: bitmaps used by skia shaders are *not* sampled from the atlas.
It's an uncommon use case and would require extra texture transforms
in the GL shaders.
WHAT IS THE ASSETS ATLAS
The "assets atlas" is a single, shareable graphic buffer that contains
all the system's preloaded bitmap drawables (this includes 9-patches.)
The atlas is made of two distinct objects: the graphic buffer that
contains the actual pixels and the map which indicates where each
preloaded bitmap can be found in the atlas (essentially a pair of
x and y coordinates.)
HOW IS THE ASSETS ATLAS GENERATED
Because we need to support a wide variety of devices and because it
is easy to change the list of preloaded drawables, the atlas is
generated at runtime, during the startup phase of the system process.
There are several steps that lead to the atlas generation:
1. If the device is booting for the first time, or if the device was
updated, we need to find the best atlas configuration. To do so,
the atlas service tries a number of width, height and algorithm
variations that allows us to pack as many assets as possible while
using as little memory as possible. Once a best configuration is found,
it gets written to disk in /data/system/framework_atlas
2. Given a best configuration (algorithm variant, dimensions and
number of bitmaps that can be packed in the atlas), the atlas service
packs all the preloaded bitmaps into a single graphic buffer object.
3. The packing is done using Skia in a temporary native bitmap. The
Skia bitmap is then copied into the graphic buffer using OpenGL ES
to benefit from texture swizzling.
HOW PROCESSES USE THE ATLAS
Whenever a process' hardware renderer initializes its EGL context,
it queries the atlas service for the graphic buffer and the map.
It is important to remember that both the context and the map will
be valid for the lifetime of the hardware renderer (if the system
process goes down, all apps get killed as well.)
Every time the hardware renderer needs to render a bitmap, it first
checks whether the bitmap can be found in the assets atlas. When
the bitmap is part of the atlas, texture coordinates are remapped
appropriately before rendering.
Change-Id: I8eaecf53e7f6a33d90da3d0047c5ceec89ea3af0
2013-04-17 18:54:38 -07:00
|
|
|
/**
|
|
|
|
* Optional, pointer to a texture coordinates mapper.
|
|
|
|
*/
|
2015-02-23 13:07:57 -08:00
|
|
|
const UvMapper* uvMapper = nullptr;
|
Pack preloaded framework assets in a texture atlas
When the Android runtime starts, the system preloads a series of assets
in the Zygote process. These assets are shared across all processes.
Unfortunately, each one of these assets is later uploaded in its own
OpenGL texture, once per process. This wastes memory and generates
unnecessary OpenGL state changes.
This CL introduces an asset server that provides an atlas to all processes.
Note: bitmaps used by skia shaders are *not* sampled from the atlas.
It's an uncommon use case and would require extra texture transforms
in the GL shaders.
WHAT IS THE ASSETS ATLAS
The "assets atlas" is a single, shareable graphic buffer that contains
all the system's preloaded bitmap drawables (this includes 9-patches.)
The atlas is made of two distinct objects: the graphic buffer that
contains the actual pixels and the map which indicates where each
preloaded bitmap can be found in the atlas (essentially a pair of
x and y coordinates.)
HOW IS THE ASSETS ATLAS GENERATED
Because we need to support a wide variety of devices and because it
is easy to change the list of preloaded drawables, the atlas is
generated at runtime, during the startup phase of the system process.
There are several steps that lead to the atlas generation:
1. If the device is booting for the first time, or if the device was
updated, we need to find the best atlas configuration. To do so,
the atlas service tries a number of width, height and algorithm
variations that allows us to pack as many assets as possible while
using as little memory as possible. Once a best configuration is found,
it gets written to disk in /data/system/framework_atlas
2. Given a best configuration (algorithm variant, dimensions and
number of bitmaps that can be packed in the atlas), the atlas service
packs all the preloaded bitmaps into a single graphic buffer object.
3. The packing is done using Skia in a temporary native bitmap. The
Skia bitmap is then copied into the graphic buffer using OpenGL ES
to benefit from texture swizzling.
HOW PROCESSES USE THE ATLAS
Whenever a process' hardware renderer initializes its EGL context,
it queries the atlas service for the graphic buffer and the map.
It is important to remember that both the context and the map will
be valid for the lifetime of the hardware renderer (if the system
process goes down, all apps get killed as well.)
Every time the hardware renderer needs to render a bitmap, it first
checks whether the bitmap can be found in the assets atlas. When
the bitmap is part of the atlas, texture coordinates are remapped
appropriately before rendering.
Change-Id: I8eaecf53e7f6a33d90da3d0047c5ceec89ea3af0
2013-04-17 18:54:38 -07:00
|
|
|
|
2014-04-11 19:15:05 -07:00
|
|
|
/**
|
|
|
|
* Whether or not the Texture is marked in use and thus not evictable for
|
|
|
|
* the current frame. This is reset at the start of a new frame.
|
|
|
|
*/
|
2015-07-21 10:23:59 -07:00
|
|
|
void* isInUse = nullptr;
|
2014-04-11 19:15:05 -07:00
|
|
|
|
2012-10-16 18:44:09 -07:00
|
|
|
private:
|
2015-11-10 12:19:17 -08:00
|
|
|
// TODO: Temporarily grant private access to Layer, remove once
|
|
|
|
// Layer can be de-tangled from being a dual-purpose render target
|
|
|
|
// and external texture wrapper
|
|
|
|
friend class Layer;
|
|
|
|
|
|
|
|
// Returns true if the size changed, false if it was the same
|
2016-10-25 15:21:50 -07:00
|
|
|
bool updateSize(uint32_t width, uint32_t height, GLint internalFormat,
|
|
|
|
GLint format, GLenum target);
|
2016-01-22 10:55:32 -08:00
|
|
|
void resetCachedParams();
|
2015-11-10 12:19:17 -08:00
|
|
|
|
|
|
|
GLuint mId = 0;
|
|
|
|
uint32_t mWidth = 0;
|
|
|
|
uint32_t mHeight = 0;
|
|
|
|
GLint mFormat = 0;
|
Linear blending, step 1
NOTE: Linear blending is currently disabled in this CL as the
feature is still a work in progress
Android currently performs all blending (any kind of linear math
on colors really) on gamma-encoded colors. Since Android assumes
that the default color space is sRGB, all bitmaps and colors
are encoded with the sRGB Opto-Electronic Conversion Function
(OECF, which can be approximated with a power function). Since
the power curve is not linear, our linear math is incorrect.
The result is that we generate colors that tend to be too dark;
this affects blending but also anti-aliasing, gradients, blurs,
etc.
The solution is to convert gamma-encoded colors back to linear
space before doing any math on them, using the sRGB Electo-Optical
Conversion Function (EOCF). This is achieved in different
ways in different parts of the pipeline:
- Using hardware conversions when sampling from OpenGL textures
or writing into OpenGL frame buffers
- Using software conversion functions, to translate app-supplied
colors to and from sRGB
- Using Skia's color spaces
Any type of processing on colors must roughly ollow these steps:
[sRGB input]->EOCF->[linear data]->[processing]->OECF->[sRGB output]
For the sRGB color space, the conversion functions are defined as
follows:
OECF(linear) :=
linear <= 0.0031308 ? linear * 12.92 : (pow(linear, 1/2.4) * 1.055) - 0.055
EOCF(srgb) :=
srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4)
The EOCF is simply the reciprocal of the OECF.
While it is highly recommended to use the exact sRGB conversion
functions everywhere possible, it is sometimes useful or beneficial
to rely on approximations:
- pow(x,2.2) and pow(x,1/2.2)
- x^2 and sqrt(x)
The latter is particularly useful in fragment shaders (for instance
to apply dithering in sRGB space), especially if the sqrt() can be
replaced with an inversesqrt().
Here is a fairly exhaustive list of modifications implemented
in this CL:
- Set TARGET_ENABLE_LINEAR_BLENDING := false in BoardConfig.mk
to disable linear blending. This is only for GLES 2.0 GPUs
with no hardware sRGB support. This flag is currently assumed
to be false (see note above)
- sRGB writes are disabled when entering a functor (WebView).
This will need to be fixed at some point
- Skia bitmaps are created with the sRGB color space
- Bitmaps using a 565 config are expanded to 888
- Linear blending is disabled when entering a functor
- External textures are not properly sampled (see below)
- Gradients are interpolated in linear space
- Texture-based dithering was replaced with analytical dithering
- Dithering is done in the quantization color space, which is
why we must do EOCF(OECF(color)+dither)
- Text is now gamma corrected differently depending on the luminance
of the source pixel. The asumption is that a bright pixel will be
blended on a dark background and the other way around. The source
alpha is gamma corrected to thicken dark on bright and thin
bright on dark to match the intended design of fonts. This also
matches the behavior of popular design/drawing applications
- Removed the asset atlas. It did not contain anything useful and
could not be sampled in sRGB without a yet-to-be-defined GL
extension
- The last column of color matrices is converted to linear space
because its value are added to linear colors
Missing features:
- Resource qualifier?
- Regeneration of goldeng images for automated tests
- Handle alpha8/grey8 properly
- Disable sRGB write for layers with external textures
Test: Manual testing while work in progress
Bug: 29940137
Change-Id: I6a07b15ab49b554377cd33a36b6d9971a15e9a0b
2016-09-28 17:34:42 -07:00
|
|
|
GLint mInternalFormat = 0;
|
2016-10-25 15:21:50 -07:00
|
|
|
GLenum mTarget = GL_NONE;
|
2015-11-10 12:19:17 -08:00
|
|
|
|
2016-01-22 10:55:32 -08:00
|
|
|
/* See GLES spec section 3.8.14
|
|
|
|
* "In the initial state, the value assigned to TEXTURE_MIN_FILTER is
|
|
|
|
* NEAREST_MIPMAP_LINEAR and the value for TEXTURE_MAG_FILTER is LINEAR.
|
|
|
|
* s, t, and r wrap modes are all set to REPEAT."
|
2010-10-25 18:03:28 -07:00
|
|
|
*/
|
2016-01-22 10:55:32 -08:00
|
|
|
GLenum mWrapS = GL_REPEAT;
|
|
|
|
GLenum mWrapT = GL_REPEAT;
|
|
|
|
GLenum mMinFilter = GL_NEAREST_MIPMAP_LINEAR;
|
|
|
|
GLenum mMagFilter = GL_LINEAR;
|
2011-07-25 16:36:01 -07:00
|
|
|
|
2013-06-04 18:00:09 -07:00
|
|
|
Caches& mCaches;
|
2010-06-29 21:05:21 -07:00
|
|
|
}; // struct Texture
|
|
|
|
|
2010-08-06 11:18:34 -07:00
|
|
|
class AutoTexture {
|
|
|
|
public:
|
2016-07-21 18:13:31 -07:00
|
|
|
explicit AutoTexture(Texture* texture)
|
2015-12-07 17:08:25 -08:00
|
|
|
: texture(texture) {}
|
2010-08-06 11:18:34 -07:00
|
|
|
~AutoTexture() {
|
2015-12-07 17:08:25 -08:00
|
|
|
if (texture && texture->cleanup) {
|
|
|
|
texture->deleteTexture();
|
|
|
|
delete texture;
|
2010-08-06 11:18:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-10 12:19:17 -08:00
|
|
|
Texture* const texture;
|
2010-08-06 11:18:34 -07:00
|
|
|
}; // class AutoTexture
|
|
|
|
|
2010-06-29 21:05:21 -07:00
|
|
|
}; // namespace uirenderer
|
|
|
|
}; // namespace android
|
|
|
|
|
2010-10-27 18:57:51 -07:00
|
|
|
#endif // ANDROID_HWUI_TEXTURE_H
|