typedefstructnative_handle { int version; /* sizeof(native_handle_t) */ int numFds; /* number of file-descriptors at &data[0] */ int numInts; /* number of ints at &data[numFds] */ ... ... int data[0]; /* numFds + numInts ints */ ... ... } native_handle_t;
// numbers of fds/ints in native_handle_t to flatten uint32_t mTransportNumFds; uint32_t mTransportNumInts;
uint64_t mId;
// Stores the generation number of this buffer. If this number does not // match the BufferQueue's internal generation number (set through // IGBP::setGenerationNumber), attempts to attach the buffer will fail. uint32_t mGenerationNumber; };
if (returnFlags & BUFFER_NEEDS_REALLOCATION) { BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot); sp<GraphicBuffer> graphicBuffer = new GraphicBuffer( width, height, format, BQ_LAYER_COUNT, usage, {mConsumerName.string(), mConsumerName.size()});
// make sure to not allocate a N x 0 or 0 x N buffer, since this is // allowed from an API stand-point allocate a 1x1 buffer instead. if (!width || !height) width = height = 1;
// Ensure that layerCount is valid. if (layerCount < 1) layerCount = 1;
/* * The returned buffers are already imported and must not be imported * again. outBufferHandles must point to a space that can contain at * least "count" buffer_handle_t. */ Error allocate(BufferDescriptor descriptor, uint32_t count, uint32_t* outStride, buffer_handle_t* outBufferHandles)const;
IAllocator* HIDL_FETCH_IAllocator(constchar* /* name */){ consthw_module_t* module = nullptr; int err = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module); if (err) { ALOGE("failed to get gralloc module"); returnnullptr; }
uint8_t major = (module->module_api_version >> 8) & 0xff; switch (major) { case1: returnnew Gralloc1Allocator(module); case0: returnnew Gralloc0Allocator(module); default: ALOGE("unknown gralloc module major version %d", major); returnnullptr; } }
typedefstructgralloc1_device { /* Must be the first member of this struct, since a pointer to this struct * will be generated by casting from a hw_device_t* */ structhw_device_tcommon;
typedefstructalloc_device_t { structhw_device_tcommon; int (*alloc)(struct alloc_device_t* dev, int w, int h, int format, int usage, buffer_handle_t* handle, int* stride);
int (*free)(struct alloc_device_t* dev, buffer_handle_t handle);
void (*dump)(struct alloc_device_t *dev, char *buff, int buff_len);
Gralloc1Allocator::Gralloc1Allocator(consthw_module_t* module) : mDevice(nullptr), mCapabilities(), mDispatch() { int result = gralloc1_open(module, &mDevice); if (result) { LOG_ALWAYS_FATAL("failed to open gralloc1 device: %s", strerror(-result)); }
template <typename T> voidGralloc1Allocator::initDispatch(gralloc1_function_descriptor_t desc, T* outPfn){ auto pfn = mDevice->getFunction(mDevice, desc); if (!pfn) { LOG_ALWAYS_FATAL("failed to get gralloc1 function %d", desc); }
Mapper::Mapper() { mMapper = IMapper::getService(); if (mMapper == nullptr) { LOG_ALWAYS_FATAL("gralloc-mapper is missing"); } if (mMapper->isRemote()) { LOG_ALWAYS_FATAL("gralloc-mapper must be in passthrough mode"); }
// IMapper 2.1 is optional mMapperV2_1 = hardware::graphics::mapper::V2_1::IMapper::castFrom(mMapper); }
/** * A buffer descriptor is an implementation-defined opaque data returned by * createDescriptor. It describes the properties of a buffer and is consumed * by the allocator. */ typedef vec<uint32_t> BufferDescriptor;
/** * Structure for describing YCbCr formats for consumption by applications. * This is used with PixelFormat::YCBCR_*_888. * * Buffer chroma subsampling is defined in the format. * e.g. PixelFormat::YCBCR_420_888 has subsampling 4:2:0. * * Buffers must have a 8 bit depth. * * y, cb, and cr point to the first byte of their respective planes. * * Stride describes the distance in bytes from the first value of one row of * the image to the first value of the next row. It includes the width of the * image plus padding. * yStride is the stride of the luma plane. * cStride is the stride of the chroma planes. * * chromaStep is the distance in bytes from one chroma pixel value to the * next. This is 2 bytes for semiplanar (because chroma values are interleaved * and each chroma value is one byte) and 1 for planar. */ structYCbCrLayout { pointer y; pointer cb; pointer cr; uint32_t yStride; uint32_t cStride; uint32_t chromaStep; };
/** * Get the transport size of a buffer. An imported buffer handle is a raw * buffer handle with the process-local runtime data appended. This * function, for example, allows a caller to omit the process-local * runtime data at the tail when serializing the imported buffer handle. * * Note that a client might or might not omit the process-local runtime * data when sending an imported buffer handle. The mapper must support * both cases on the receiving end. * * @param buffer is the buffer to get the transport size from. * @return error is NONE upon success. Otherwise, * BAD_BUFFER when the buffer is invalid. * @return numFds is the number of file descriptors needed for transport. * @return numInts is the number of integers needed for transport. */ getTransportSize(pointer buffer) generates (Error error, uint32_t numFds, uint32_t numInts); };
IMapper* HIDL_FETCH_IMapper(constchar* /* name */){ consthw_module_t* module = nullptr; int err = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module); if (err) { ALOGE("failed to get gralloc module"); returnnullptr; }
uint8_t major = (module->module_api_version >> 8) & 0xff; switch (major) { case1: returnnew Gralloc1Mapper(module); case0: returnnew Gralloc0Mapper(module); default: ALOGE("unknown gralloc module major version %d", major); returnnullptr; } }
Gralloc1Mapper::Gralloc1Mapper(consthw_module_t* module) : mDevice(nullptr), mDispatch() { int result = gralloc1_open(module, &mDevice); if (result) { LOG_ALWAYS_FATAL("failed to open gralloc1 device: %s", strerror(-result)); }
template <typename T> voidGralloc1Mapper::initDispatch(gralloc1_function_descriptor_t desc, T* outPfn){ auto pfn = mDevice->getFunction(mDevice, desc); if (!pfn) { LOG_ALWAYS_FATAL("failed to get gralloc1 function %d", desc); }
/* If this capability is supported, then the outBuffers parameter to * allocate may be NULL, which instructs the device to report whether the * given allocation is possible or not. */ GRALLOC1_CAPABILITY_TEST_ALLOCATE = 1,
/* If this capability is supported, then the implementation supports * allocating buffers with more than one image layer. */ GRALLOC1_CAPABILITY_LAYERED_BUFFERS = 2,
/* If this capability is supported, then the implementation always closes * and deletes a buffer handle whenever the last reference is removed. * * Supporting this capability is strongly recommended. It will become * mandatory in future releases. */ GRALLOC1_CAPABILITY_RELEASE_IMPLY_DELETE = 3,
gralloc1_function_pointer_tGrallocImpl::GetFunction(gralloc1_device_t *device, int32_t function){ if (!device) { returnNULL; }
switch (function) { case GRALLOC1_FUNCTION_DUMP: returnreinterpret_cast<gralloc1_function_pointer_t>(Dump); case GRALLOC1_FUNCTION_CREATE_DESCRIPTOR: returnreinterpret_cast<gralloc1_function_pointer_t>(CreateBufferDescriptor); case GRALLOC1_FUNCTION_DESTROY_DESCRIPTOR: returnreinterpret_cast<gralloc1_function_pointer_t>(DestroyBufferDescriptor); case GRALLOC1_FUNCTION_SET_CONSUMER_USAGE: returnreinterpret_cast<gralloc1_function_pointer_t>(SetConsumerUsage); case GRALLOC1_FUNCTION_SET_DIMENSIONS: returnreinterpret_cast<gralloc1_function_pointer_t>(SetBufferDimensions); case GRALLOC1_FUNCTION_SET_FORMAT: returnreinterpret_cast<gralloc1_function_pointer_t>(SetColorFormat); case GRALLOC1_FUNCTION_SET_LAYER_COUNT: returnreinterpret_cast<gralloc1_function_pointer_t>(SetLayerCount); case GRALLOC1_FUNCTION_SET_PRODUCER_USAGE: returnreinterpret_cast<gralloc1_function_pointer_t>(SetProducerUsage); case GRALLOC1_FUNCTION_GET_BACKING_STORE: returnreinterpret_cast<gralloc1_function_pointer_t>(GetBackingStore); case GRALLOC1_FUNCTION_GET_CONSUMER_USAGE: returnreinterpret_cast<gralloc1_function_pointer_t>(GetConsumerUsage); case GRALLOC1_FUNCTION_GET_DIMENSIONS: returnreinterpret_cast<gralloc1_function_pointer_t>(GetBufferDimensions); case GRALLOC1_FUNCTION_GET_FORMAT: returnreinterpret_cast<gralloc1_function_pointer_t>(GetColorFormat); case GRALLOC1_FUNCTION_GET_LAYER_COUNT: returnreinterpret_cast<gralloc1_function_pointer_t>(GetLayerCount); case GRALLOC1_FUNCTION_GET_PRODUCER_USAGE: returnreinterpret_cast<gralloc1_function_pointer_t>(GetProducerUsage); case GRALLOC1_FUNCTION_GET_STRIDE: returnreinterpret_cast<gralloc1_function_pointer_t>(GetBufferStride); case GRALLOC1_FUNCTION_ALLOCATE: returnreinterpret_cast<gralloc1_function_pointer_t>(AllocateBuffers); case GRALLOC1_FUNCTION_RETAIN: returnreinterpret_cast<gralloc1_function_pointer_t>(RetainBuffer); case GRALLOC1_FUNCTION_RELEASE: returnreinterpret_cast<gralloc1_function_pointer_t>(ReleaseBuffer); case GRALLOC1_FUNCTION_GET_NUM_FLEX_PLANES: returnreinterpret_cast<gralloc1_function_pointer_t>(GetNumFlexPlanes); case GRALLOC1_FUNCTION_LOCK: returnreinterpret_cast<gralloc1_function_pointer_t>(LockBuffer); case GRALLOC1_FUNCTION_LOCK_FLEX: returnreinterpret_cast<gralloc1_function_pointer_t>(LockFlex); case GRALLOC1_FUNCTION_UNLOCK: returnreinterpret_cast<gralloc1_function_pointer_t>(UnlockBuffer); case GRALLOC1_FUNCTION_PERFORM: returnreinterpret_cast<gralloc1_function_pointer_t>(Gralloc1Perform); default: ALOGE("%s:Gralloc Error. Client Requested for unsupported function", __FUNCTION__); returnNULL; }
enum ion_heap_type { ION_HEAP_TYPE_SYSTEM, ION_HEAP_TYPE_SYSTEM_CONTIG, ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_CHUNK, ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always are at the end of this enum */ ION_NUM_HEAPS = 16, };
#define ION_FLAG_CACHED 1 /* mappings of this buffer should be cached, ion will do cache maintenance when the buffer is mapped for dma */ #define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created at mmap time, if this is set caches must be managed manually */
voidAllocator::GetIonHeapInfo(gralloc1_producer_usage_t prod_usage, gralloc1_consumer_usage_t cons_usage, unsignedint *ion_heap_id, unsignedint *alloc_type, unsignedint *ion_flags){ unsignedint heap_id = 0; unsignedint type = 0; uint32_t flags = 0; if (prod_usage & GRALLOC1_PRODUCER_USAGE_PROTECTED) { if (cons_usage & GRALLOC1_CONSUMER_USAGE_PRIVATE_SECURE_DISPLAY) { heap_id = ION_HEAP(SD_HEAP_ID); /* * There is currently no flag in ION for Secure Display * VM. Please add it to the define once available. */ flags |= UINT(ION_SD_FLAGS); } elseif (prod_usage & GRALLOC1_PRODUCER_USAGE_CAMERA) { heap_id = ION_HEAP(SD_HEAP_ID); if (cons_usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER) { flags |= UINT(ION_SC_PREVIEW_FLAGS); } else { flags |= UINT(ION_SC_FLAGS); } } else { heap_id = ION_HEAP(CP_HEAP_ID); flags |= UINT(ION_CP_FLAGS); } } elseif (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_MM_HEAP) { // MM Heap is exclusively a secure heap. // If it is used for non secure cases, fallback to IOMMU heap ALOGW("MM_HEAP cannot be used as an insecure heap. Using system heap instead!!"); heap_id |= ION_HEAP(ION_SYSTEM_HEAP_ID); }
if (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_CAMERA_HEAP) { heap_id |= ION_HEAP(ION_CAMERA_HEAP_ID); }
/** * struct ion_platform_heap - defines a heap in the given platform * @type: type of the heap from ion_heap_type enum * @id: unique identifier for heap. When allocating higher numbers * will be allocated from first. At allocation these are passed * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. * @name: used for debug purposes * @base: base address of heap in physical memory if applicable * @size: size of the heap in bytes if applicable * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. * @extra_data: Extra data specific to each heap type * @priv: heap private data * @align: required alignment in physical memory if applicable * @priv: private info passed from the board file * * Provided by the board file. */ struction_platform_heap { enum ion_heap_type type; unsignedint id; constchar *name; ion_phys_addr_t base; size_t size; unsignedint has_outer_cache; void *extra_data; ion_phys_addr_t align; void *priv; };
type,就是dts中的ion-heap-type再加上ION_HEAP_TYPE_的前缀。 id,唯一的,是dts中reg的值 base,是物理地址的起始地址 name 是heap的名字,主要用来debug,以对应的id的形式定义在ion_heap_meta中。
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || !heap->ops->unmap_dma) pr_err("%s: can not add heap with invalid ops struct.\n", __func__);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_init_deferred_free(heap);
if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) ion_heap_init_shrinker(heap);
heap->dev = dev; down_write(&dev->lock); /* * use negative heap->id to reverse the priority -- when traversing * the list later attempt higher id numbers first */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); debug_file = debugfs_create_file(heap->name, 0664, dev->heaps_debug_root, heap, &debug_heap_fops);
struction_cma_secure_heap { structdevice *dev; /* * Protects against races between threads allocating memory/adding to * pool at the same time. (e.g. thread 1 adds to pool, thread 2 * allocates thread 1's memory before thread 1 knows it needs to * allocate more. * Admittedly this is fairly coarse grained right now but the chance for * contention on this lock is unlikely right now. This can be changed if * this ever changes in the future */ structmutexalloc_lock; /* * protects the list of memory chunks in this pool */ structmutexchunk_lock; struction_heapheap; /* * Bitmap for allocation. This contains the aggregate of all chunks. */ unsignedlong *bitmap; /* * List of all allocated chunks * * This is where things get 'clever'. Individual allocations from * dma_alloc_coherent must be allocated and freed in one chunk. * We don't just want to limit the allocations to those confined * within a single chunk (if clients allocate n small chunks we would * never be able to use the combined size). The bitmap allocator is * used to find the contiguous region and the parts of the chunks are * marked off as used. The chunks won't be freed in the shrinker until * the usage is actually zero. */ structlist_headchunks; int npages; ion_phys_addr_t base; structwork_structwork; unsignedlong last_alloc; structshrinkershrinker; atomic_t total_allocated; atomic_t total_pool_size; atomic_t total_leaked; unsignedlong heap_size; unsignedlong default_prefetch_size; };
uint32_tLayer::getEffectiveUsage(uint32_t usage)const{ // TODO: should we do something special if mSecure is set? if (mProtectedByApp) { // need a hardware-protected path to external video sink usage |= GraphicBuffer::USAGE_PROTECTED; } if (mPotentialCursor) { usage |= GraphicBuffer::USAGE_CURSOR; } usage |= GraphicBuffer::USAGE_HW_COMPOSER; return usage; }
... /sys/kernel/debug/ion/heaps # cat system client pid size ---------------------------------------------------- ---------------------------------------------------- orphaned allocations(info is from last known client): client pid user user_pid size mcnt rcnt allocator@2.0-s 257 composer@2.1-se 258 7372800 0 1 allocator@2.0-s 257 composer@2.1-se 25813926401 allocator@2.0-s 257 composer@2.1-se 25813926401 allocator@2.0-s 257 composer@2.1-se 258368640001 allocator@2.0-s 257 composer@2.1-se 258368640001 allocator@2.0-s 257 composer@2.1-se 258368640001 allocator@2.0-s 257 composer@2.1-se 25813926401 ---------------------------------------------------- total orphaned 18849792 total 18849792 deferred free0 ---------------------------------------------------- 4 order 8 highmem pages in uncached pool = 4194304 total 2 order 8 lowmem pages in uncached pool = 2097152 total 14 order 4 highmem pages in uncached pool = 917504 total 0 order 4 lowmem pages in uncached pool = 0 total 0 order 0 highmem pages in uncached pool = 0 total 838 order 0 lowmem pages in uncached pool = 3432448 total 0 order 8 highmem pages in cached pool = 0 total 0 order 8 lowmem pages in cached pool = 0 total 0 order 4 highmem pages in cached pool = 0 total 0 order 4 lowmem pages in cached pool = 0 total 0 order 0 highmem pages in cached pool = 0 total 0 order 0 lowmem pages in cached pool = 0 total