mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-25 20:10:23 +09:00
Revert "mm: fix struct page layout on 32-bit systems"
This reverts commit cfddf6a685 as it
breaks the kernel abi at the moment. It will be restored at a later
point in time.
Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ic4dd11b89bcfd8ded25e4cb39f16967b22f5fa88
This commit is contained in:
@@ -98,10 +98,10 @@ struct page {
|
||||
};
|
||||
struct { /* page_pool used by netstack */
|
||||
/**
|
||||
* @dma_addr: might require a 64-bit value on
|
||||
* @dma_addr: might require a 64-bit value even on
|
||||
* 32-bit architectures.
|
||||
*/
|
||||
unsigned long dma_addr[2];
|
||||
dma_addr_t dma_addr;
|
||||
};
|
||||
struct { /* slab, slob and slub */
|
||||
union {
|
||||
|
||||
@@ -191,17 +191,7 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
|
||||
|
||||
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
|
||||
{
|
||||
dma_addr_t ret = page->dma_addr[0];
|
||||
if (sizeof(dma_addr_t) > sizeof(unsigned long))
|
||||
ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
|
||||
{
|
||||
page->dma_addr[0] = addr;
|
||||
if (sizeof(dma_addr_t) > sizeof(unsigned long))
|
||||
page->dma_addr[1] = upper_32_bits(addr);
|
||||
return page->dma_addr;
|
||||
}
|
||||
|
||||
static inline bool is_page_pool_compiled_in(void)
|
||||
|
||||
@@ -172,10 +172,8 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
|
||||
struct page *page,
|
||||
unsigned int dma_sync_size)
|
||||
{
|
||||
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
|
||||
|
||||
dma_sync_size = min(dma_sync_size, pool->p.max_len);
|
||||
dma_sync_single_range_for_device(pool->p.dev, dma_addr,
|
||||
dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
|
||||
pool->p.offset, dma_sync_size,
|
||||
pool->p.dma_dir);
|
||||
}
|
||||
@@ -226,7 +224,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
|
||||
put_page(page);
|
||||
return NULL;
|
||||
}
|
||||
page_pool_set_dma_addr(page, dma);
|
||||
page->dma_addr = dma;
|
||||
|
||||
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
|
||||
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
|
||||
@@ -294,13 +292,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
|
||||
*/
|
||||
goto skip_dma_unmap;
|
||||
|
||||
dma = page_pool_get_dma_addr(page);
|
||||
dma = page->dma_addr;
|
||||
|
||||
/* When page is unmapped, it cannot be returned to our pool */
|
||||
/* When page is unmapped, it cannot be returned our pool */
|
||||
dma_unmap_page_attrs(pool->p.dev, dma,
|
||||
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
page_pool_set_dma_addr(page, 0);
|
||||
page->dma_addr = 0;
|
||||
skip_dma_unmap:
|
||||
/* This may be the last page returned, releasing the pool, so
|
||||
* it is not safe to reference pool afterwards.
|
||||
|
||||
Reference in New Issue
Block a user