mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
dmapool: move debug code to own functions
Clean up the normal path by moving the debug code outside it.
Link: https://lkml.kernel.org/r/20230126215125.4069751-7-kbusch@meta.com
Fixes: 2d55c16c0c
("dmapool: create/destroy cleanup")
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Tony Battersby <tonyb@cybernetics.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
290911c56f
commit
d93e08b755
128
mm/dmapool.c
128
mm/dmapool.c
@@ -96,6 +96,78 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
|
|||||||
|
|
||||||
static DEVICE_ATTR_RO(pools);
|
static DEVICE_ATTR_RO(pools);
|
||||||
|
|
||||||
|
#ifdef DMAPOOL_DEBUG
|
||||||
|
static void pool_check_block(struct dma_pool *pool, void *retval,
|
||||||
|
unsigned int offset, gfp_t mem_flags)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u8 *data = retval;
|
||||||
|
/* page->offset is stored in first 4 bytes */
|
||||||
|
for (i = sizeof(offset); i < pool->size; i++) {
|
||||||
|
if (data[i] == POOL_POISON_FREED)
|
||||||
|
continue;
|
||||||
|
dev_err(pool->dev, "%s %s, %p (corrupted)\n",
|
||||||
|
__func__, pool->name, retval);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Dump the first 4 bytes even if they are not
|
||||||
|
* POOL_POISON_FREED
|
||||||
|
*/
|
||||||
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
|
||||||
|
data, pool->size, 1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!want_init_on_alloc(mem_flags))
|
||||||
|
memset(retval, POOL_POISON_ALLOCATED, pool->size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
|
||||||
|
void *vaddr, dma_addr_t dma)
|
||||||
|
{
|
||||||
|
unsigned int offset = vaddr - page->vaddr;
|
||||||
|
unsigned int chain = page->offset;
|
||||||
|
|
||||||
|
if ((dma - page->dma) != offset) {
|
||||||
|
dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
|
||||||
|
__func__, pool->name, vaddr, &dma);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (chain < pool->allocation) {
|
||||||
|
if (chain != offset) {
|
||||||
|
chain = *(int *)(page->vaddr + chain);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
dev_err(pool->dev, "%s %s, dma %pad already free\n",
|
||||||
|
__func__, pool->name, &dma);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
memset(vaddr, POOL_POISON_FREED, pool->size);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
|
||||||
|
{
|
||||||
|
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void pool_check_block(struct dma_pool *pool, void *retval,
|
||||||
|
unsigned int offset, gfp_t mem_flags)
|
||||||
|
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
|
||||||
|
void *vaddr, dma_addr_t dma)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
|
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
|
||||||
* @name: name of pool, for diagnostics
|
* @name: name of pool, for diagnostics
|
||||||
@@ -223,9 +295,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
|
|||||||
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
|
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
|
||||||
&page->dma, mem_flags);
|
&page->dma, mem_flags);
|
||||||
if (page->vaddr) {
|
if (page->vaddr) {
|
||||||
#ifdef DMAPOOL_DEBUG
|
pool_init_page(pool, page);
|
||||||
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
|
|
||||||
#endif
|
|
||||||
pool_initialise_page(pool, page);
|
pool_initialise_page(pool, page);
|
||||||
page->in_use = 0;
|
page->in_use = 0;
|
||||||
page->offset = 0;
|
page->offset = 0;
|
||||||
@@ -245,9 +315,7 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
|
|||||||
{
|
{
|
||||||
dma_addr_t dma = page->dma;
|
dma_addr_t dma = page->dma;
|
||||||
|
|
||||||
#ifdef DMAPOOL_DEBUG
|
pool_init_page(pool, page);
|
||||||
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
|
|
||||||
#endif
|
|
||||||
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
|
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
|
||||||
list_del(&page->page_list);
|
list_del(&page->page_list);
|
||||||
kfree(page);
|
kfree(page);
|
||||||
@@ -336,29 +404,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
|
|||||||
page->offset = *(int *)(page->vaddr + offset);
|
page->offset = *(int *)(page->vaddr + offset);
|
||||||
retval = offset + page->vaddr;
|
retval = offset + page->vaddr;
|
||||||
*handle = offset + page->dma;
|
*handle = offset + page->dma;
|
||||||
#ifdef DMAPOOL_DEBUG
|
pool_check_block(pool, retval, offset, mem_flags);
|
||||||
{
|
|
||||||
int i;
|
|
||||||
u8 *data = retval;
|
|
||||||
/* page->offset is stored in first 4 bytes */
|
|
||||||
for (i = sizeof(page->offset); i < pool->size; i++) {
|
|
||||||
if (data[i] == POOL_POISON_FREED)
|
|
||||||
continue;
|
|
||||||
dev_err(pool->dev, "%s %s, %p (corrupted)\n",
|
|
||||||
__func__, pool->name, retval);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Dump the first 4 bytes even if they are not
|
|
||||||
* POOL_POISON_FREED
|
|
||||||
*/
|
|
||||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
|
|
||||||
data, pool->size, 1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!want_init_on_alloc(mem_flags))
|
|
||||||
memset(retval, POOL_POISON_ALLOCATED, pool->size);
|
|
||||||
#endif
|
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
|
||||||
if (want_init_on_alloc(mem_flags))
|
if (want_init_on_alloc(mem_flags))
|
||||||
@@ -394,7 +440,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
|||||||
{
|
{
|
||||||
struct dma_page *page;
|
struct dma_page *page;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int offset;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
page = pool_find_page(pool, dma);
|
page = pool_find_page(pool, dma);
|
||||||
@@ -405,35 +450,16 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = vaddr - page->vaddr;
|
|
||||||
if (want_init_on_free())
|
if (want_init_on_free())
|
||||||
memset(vaddr, 0, pool->size);
|
memset(vaddr, 0, pool->size);
|
||||||
#ifdef DMAPOOL_DEBUG
|
if (pool_page_err(pool, page, vaddr, dma)) {
|
||||||
if ((dma - page->dma) != offset) {
|
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
|
|
||||||
__func__, pool->name, vaddr, &dma);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
{
|
|
||||||
unsigned int chain = page->offset;
|
|
||||||
while (chain < pool->allocation) {
|
|
||||||
if (chain != offset) {
|
|
||||||
chain = *(int *)(page->vaddr + chain);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
|
||||||
dev_err(pool->dev, "%s %s, dma %pad already free\n",
|
|
||||||
__func__, pool->name, &dma);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
memset(vaddr, POOL_POISON_FREED, pool->size);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
page->in_use--;
|
page->in_use--;
|
||||||
*(int *)vaddr = page->offset;
|
*(int *)vaddr = page->offset;
|
||||||
page->offset = offset;
|
page->offset = vaddr - page->vaddr;
|
||||||
/*
|
/*
|
||||||
* Resist a temptation to do
|
* Resist a temptation to do
|
||||||
* if (!is_page_busy(page)) pool_free_page(pool, page);
|
* if (!is_page_busy(page)) pool_free_page(pool, page);
|
||||||
|
Reference in New Issue
Block a user