Files
linux/fs/squashfs/block.c
Tao Zeng 17ee56708e fs: fix crash in squash fs [1/1]
PD#SWPL-37734

Problem:
crash in fs if put page reference count

Solution:
1, remove modify of put page;
2, revert following changes introduced by google:
	commit 60cc09a9e3
	Author: Adrien Schildknecht <adriens@google.com>
	Date:   Thu Sep 29 15:25:30 2016 -0700

	ANDROID: Squashfs: optimize reading uncompressed data

	When dealing with uncompressed data, there is no need to read a whole
	block (default 128K) to get the desired page: the pages are
	independent from each others.

	This patch change the readpages logic so that reading uncompressed
	data only read the number of pages advised by the readahead algorithm.

	Moreover, if the page actor contains holes (i.e. pages that are already
		up-to-date), squashfs skips the buffer_head associated to those pages.

	This patch greatly improve the performance of random reads for
	uncompressed files because squashfs only read what is needed. It also
	reduces the number of unnecessary reads.

	Change-Id: I90a77343bb994a1de7482eb43eaf6d2021502c22
	Signed-off-by: Adrien Schildknecht <adriens@google.com>

	---------------------------------------------------
	commit d840c1d772
	Author: Adrien Schildknecht <adrien+dev@schischi.me>
	Date:   Fri Oct 14 21:03:54 2016 -0700

	ANDROID: Squashfs: implement .readpages()

	Squashfs does not implement .readpages(), so the kernel just repeatedly
	calls .readpage().

	The readpages function tries to pack as much pages as possible in the
	same page actor so that only 1 read request is issued.

	Now that the read requests are asynchronous, the kernel can truly
	prefetch pages using its readahead algorithm.

	Change-Id: I65b9aa2ddc9444aaf9ccf60781172ccca0f3f518
	Signed-off-by: Adrien Schildknecht <adriens@google.com>

	---------------------------------------------------
	ANDROID: Squashfs: replace buffer_head with BIO
	The 'll_rw_block' has been deprecated and BIO is now the basic container
	for block I/O within the kernel.

	Switching to BIO offers 2 advantages:
	1/ It removes synchronous wait for the up-to-date buffers: SquashFS
	now deals with decompressions/copies asynchronously.
	Implementing an asynchronous mechanism to read data is needed to
	efficiently implement .readpages().
	2/ Prior to this patch, merging the read requests entirely depends on
	the IO scheduler. SquashFS has more information than the IO
	scheduler about what could be merged. Moreover, merging the reads
	at the FS level means that we rely less on the IO scheduler.

	Change-Id: I668812cc1e78e2f92497f9ebe0157cb8eec725ba
	Signed-off-by: Adrien Schildknecht <adriens@google.com>

Verify:
t318

Change-Id: I9ea62393066122cd720f41d50bde6cdf925fb06a
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
2023-04-21 13:52:37 +09:00

215 lines
5.5 KiB
C

/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* block.c
*/
/*
* This file implements the low-level routines to read and decompress
* datablocks and metadata blocks.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
/*
* Read the metadata block length, this is stored in the first two
* bytes of the metadata block.
*/
static struct buffer_head *get_block_length(struct super_block *sb,
u64 *cur_index, int *offset, int *length)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head *bh;
bh = sb_bread(sb, *cur_index);
if (bh == NULL)
return NULL;
if (msblk->devblksize - *offset == 1) {
*length = (unsigned char) bh->b_data[*offset];
put_bh(bh);
bh = sb_bread(sb, ++(*cur_index));
if (bh == NULL)
return NULL;
*length |= (unsigned char) bh->b_data[0] << 8;
*offset = 1;
} else {
*length = (unsigned char) bh->b_data[*offset] |
(unsigned char) bh->b_data[*offset + 1] << 8;
*offset += 2;
if (*offset == msblk->devblksize) {
put_bh(bh);
bh = sb_bread(sb, ++(*cur_index));
if (bh == NULL)
return NULL;
*offset = 0;
}
}
return bh;
}
/*
* Read and decompress a metadata block or datablock. Length is non-zero
* if a datablock is being read (the size is stored elsewhere in the
* filesystem), otherwise the length is obtained from the first two bytes of
* the metadata block. A bit in the length field indicates if the block
* is stored uncompressed in the filesystem (usually because compression
* generated a larger block - this does occasionally happen with compression
* algorithms).
*/
int squashfs_read_data(struct super_block *sb, u64 index, int length,
u64 *next_index, struct squashfs_page_actor *output)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head **bh;
int offset = index & ((1 << msblk->devblksize_log2) - 1);
u64 cur_index = index >> msblk->devblksize_log2;
int bytes, compressed, b = 0, k = 0, avail, i;
bh = kcalloc(((output->length + msblk->devblksize - 1)
>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
if (bh == NULL)
return -ENOMEM;
if (length) {
/*
* Datablock.
*/
bytes = -offset;
compressed = SQUASHFS_COMPRESSED_BLOCK(length);
length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
if (next_index)
*next_index = index + length;
TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
index, compressed ? "" : "un", length, output->length);
if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used)
goto read_failure;
for (b = 0; bytes < length; b++, cur_index++) {
bh[b] = sb_getblk(sb, cur_index);
if (bh[b] == NULL)
goto block_release;
bytes += msblk->devblksize;
}
ll_rw_block(REQ_OP_READ, 0, b, bh);
} else {
/*
* Metadata block.
*/
if ((index + 2) > msblk->bytes_used)
goto read_failure;
bh[0] = get_block_length(sb, &cur_index, &offset, &length);
if (bh[0] == NULL)
goto read_failure;
b = 1;
bytes = msblk->devblksize - offset;
compressed = SQUASHFS_COMPRESSED(length);
length = SQUASHFS_COMPRESSED_SIZE(length);
if (next_index)
*next_index = index + length + 2;
TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
compressed ? "" : "un", length);
if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used)
goto block_release;
for (; bytes < length; b++) {
bh[b] = sb_getblk(sb, ++cur_index);
if (bh[b] == NULL)
goto block_release;
bytes += msblk->devblksize;
}
ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
}
for (i = 0; i < b; i++) {
wait_on_buffer(bh[i]);
if (!buffer_uptodate(bh[i]))
goto block_release;
}
if (compressed) {
length = squashfs_decompress(msblk, bh, b, offset, length,
output);
if (length < 0)
goto read_failure;
} else {
/*
* Block is uncompressed.
*/
int in, pg_offset = 0;
void *data = squashfs_first_page(output);
for (bytes = length; k < b; k++) {
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
while (in) {
if (pg_offset == PAGE_SIZE) {
data = squashfs_next_page(output);
pg_offset = 0;
}
avail = min_t(int, in, PAGE_SIZE -
pg_offset);
memcpy(data + pg_offset, bh[k]->b_data + offset,
avail);
in -= avail;
pg_offset += avail;
offset += avail;
}
offset = 0;
put_bh(bh[k]);
}
squashfs_finish_page(output);
}
kfree(bh);
return length;
block_release:
for (; k < b; k++)
put_bh(bh[k]);
read_failure:
ERROR("squashfs_read_data failed to read block 0x%llx\n",
(unsigned long long) index);
kfree(bh);
return -EIO;
}